首页 > 解决方案 > 使用 argparse 和 concurrent.futures 包冻结程序

问题描述

我正在编写一个 python 程序来处理 HPC bash 终端上的 NGS 测序数据。该程序使用单个进程或多个进程在我的 mac 上的 jupyter notebook 上正常运行。但是,只要我尝试使用 argpase 包在终端中传递参数。该程序不会给我最终结果,而是会无限期地运行,就好像该过程尚未完成一样。我检查并几乎可以肯定它是由 argpase 和 concurrent.futures.ProcessPoolExecutor() 之间的一些冲突引起的。那么,有人可以就如何解决这个问题提出一些建议吗?谢谢!

以下代码在终端上产生冻结问题。

#! /usr/bin/env python

import pandas as pd
import time
import concurrent.futures
import argparse


def run(args):
    start = time.perf_counter()
    input_file = args.input
    output_file = args.output
    chunk = args.chunk_size

    def cal_breaking(data):
        for index, row in data.iterrows():
            if row[1] == 0:  # mapping to the foward strand
                data.at[index, 'breaking_pos'] = int(row[5]) + int(row[3])
            elif row[1] == 16:  # mapping to the reverse strand
                data.at[index, 'breaking_pos'] = int(row[3])
            else:
                pass
        return data

    new_df = pd.DataFrame(
        columns=['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL'])
    processes = []
    for df in pd.read_csv(input_file, delimiter='\t', usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], chunksize=chunk):
        df.columns = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL']
        df = df.loc[~df['CIGAR'].str.contains('S') & ~df['CIGAR'].str.contains(
            'H')]  # filtered out those read that contains 'soft clip' and 'hard clip' sequences
        df['CIGAR'] = df.iloc[:, 5].str.extract(
            r'(\d+)')  # -d+ regex expression representing one or more numbers(0-9)
        df['breaking_pos'] = None
        with concurrent.futures.ProcessPoolExecutor() as executor:
            processes.append(executor.submit(cal_breaking, df))
    for process in processes:
        new_df = pd.concat([new_df, process.result()], sort=True)

    new_df['count'] = 1
    new_df = new_df.groupby(['RNAME', 'breaking_pos']).count()['count'].reset_index()
    new_df['end'] = new_df['breaking_pos'] + 1
    new_df = new_df[['RNAME', 'breaking_pos', 'end', 'count']]
    new_df.to_csv(output_file, '\t', index=None, header=None)
    end = time.perf_counter()
    print(f'process finished in {round(end - start, 2)} second(s)')


def main():
    parser = argparse.ArgumentParser(description="tagging HiC-Pro pair's sub-compartment")
    parser.add_argument("-in", help="input pairs file", dest="input", type=str, required=True)
    parser.add_argument("-out", help="output files name", dest="output", type=str, required=True)
    parser.add_argument("-ck", help="read in chunk size", dest="chunk_size", type=int, required=True)
    parser.set_defaults(func=run)
    args = parser.parse_args()
    args.func(args)


if __name__ == "__main__":
    main()

如果我不使用多处理,以下代码在终端上运行良好,没有问题:

#! /usr/bin/env python

import pandas as pd
import time
import argparse


def run(args):
    start = time.perf_counter()
    input_file = args.input
    output_file = args.output
    chunk = args.chunk_size

    def cal_breaking(data):
        for index, row in data.iterrows():
            if row[1] == 0:  # mapping to the foward strand
                data.at[index, 'breaking_pos'] = int(row[5]) + int(row[3])
            elif row[1] == 16:  # mapping to the reverse strand
                data.at[index, 'breaking_pos'] = int(row[3])
            else:
                pass
        return data

    new_df = pd.DataFrame(
        columns=['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL'])

    for df in pd.read_csv(input_file, delimiter='\t', usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], chunksize=chunk):
        df.columns = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL']
        df = df.loc[~df['CIGAR'].str.contains('S') & ~df['CIGAR'].str.contains(
            'H')]  # filtered out those read that contains 'soft clip' and 'hard clip' sequences
        df['CIGAR'] = df.iloc[:, 5].str.extract(
            r'(\d+)')  # -d+ regex expression representing one or more numbers(0-9)
        df['breaking_pos'] = None
        new_df = pd.concat([new_df, cal_breaking(df)], sort=True)

    new_df['count'] = 1
    new_df = new_df.groupby(['RNAME', 'breaking_pos']).count()['count'].reset_index()
    new_df['end'] = new_df['breaking_pos'] + 1
    new_df = new_df[['RNAME', 'breaking_pos', 'end', 'count']]
    new_df.to_csv(output_file, '\t', index=None, header=None)
    end = time.perf_counter()
    print(f'process finished in {round(end - start, 2)} second(s)')


def main():
    parser = argparse.ArgumentParser(description="tagging HiC-Pro pair's sub-compartment")
    parser.add_argument("-in", help="input pairs file", dest="input", type=str, required=True)
    parser.add_argument("-out", help="output files name", dest="output", type=str, required=True)
    parser.add_argument("-ck", help="read in chunk size", dest="chunk_size", type=int, required=True)
    parser.set_defaults(func=run)
    args = parser.parse_args()
    args.func(args)


if __name__ == "__main__":
    main()

标签: pythonmultiprocessingargparse

解决方案


ProcessPoolExecutor 类是 Executor 子类,它使用进程池异步执行调用。ProcessPoolExecutor 使用多处理模块,这允许它绕过全局解释器锁,但也意味着只能执行和返回可提取对象。

根据文档,默认情况下它需要max_workers <= 61,这里我修改了一些部分来工作。

with concurrent.futures.ProcessPoolExecutor(max_workers=6) as executor:
            processes.append(executor.submit(cal_breaking, df))

推荐阅读