diff --git a/mcsm/mcsm.py b/mcsm/mcsm.py index 3da5ce6..8a6388d 100644 --- a/mcsm/mcsm.py +++ b/mcsm/mcsm.py @@ -124,9 +124,9 @@ def scrape_results(result_url): print('FAIL: Could not fetch results' , '\nCheck if url is valid') # extract results using the html parser - soup = BeautifulSoup(result_response.text, features = 'html.parser') + soup = BeautifulSoup(result_response.text, features = 'html.parser') # print(soup) - web_result_raw = soup.find(class_ = 'span4').get_text() + web_result_raw = soup.find(class_ = 'span4').get_text() return web_result_raw @@ -156,11 +156,10 @@ def build_result_dict(web_result_raw): result_dict = {} for line in mytext.split('\n'): fields = line.split(':') - # print(fields) + #print(fields) if len(fields) > 1: # since Mutaton information is empty - dict_entry = dict([(x, y) for x, y in zip(fields[::2], fields[1::2])]) - result_dict.update(dict_entry) - + dict_entry = dict([(x, y) for x, y in zip(fields[::2], fields[1::2])]) + result_dict.update(dict_entry) return result_dict #%% #======================================================================= diff --git a/mcsm/mcsm_wrapper.py b/mcsm/mcsm_wrapper.py index 795aa27..1ca8069 100755 --- a/mcsm/mcsm_wrapper.py +++ b/mcsm/mcsm_wrapper.py @@ -63,7 +63,8 @@ outfile_format = outdir + '/' + out_filename_format #%%===================================================================== def submit_mcsm(): my_chain = 'A' - my_ligand_id = 'DCS' # FIXME +# my_ligand_id = 'DCS' # FIXME + my_ligand_id = 'RMP' # FIXME my_affinity = 10 print('Result urls and error file (if any) will be written in: ', outdir) @@ -88,6 +89,7 @@ def submit_mcsm(): , 'minutes, but will be longer for more mutations.') #%%===================================================================== def get_results(): + output_df = pd.DataFrame() url_counter = 1 # HURR DURR COUNT STARTEDS AT ONE1`!1 infile_len = os.popen('wc -l < %s' % result_urls).read() # quicker than using Python :-) #FIXME filenme (infile_urls) @@ -97,7 +99,6 @@ def get_results(): with open(result_urls, 'r') as urlfile: for line in urlfile: url_line = line.strip() - # call functions results_interim = scrape_results(url_line) result_dict = build_result_dict(results_interim)