Compare commits
1 Commits
Experiment
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 85aadeb996 |
21
.gitignore
vendored
21
.gitignore
vendored
@@ -1,16 +1,7 @@
|
|||||||
# 1. Broad Ignores
|
# Ignore all contents of these directories
|
||||||
/Data/*
|
|
||||||
/attach/*
|
|
||||||
/results/*
|
|
||||||
/enarcelona/*
|
|
||||||
.env
|
|
||||||
__pycache__/
|
|
||||||
*.pyc
|
|
||||||
|
|
||||||
# 2. Ignore virtual environments COMPLETELY
|
|
||||||
# This must come BEFORE the unignore rule
|
|
||||||
env*/
|
|
||||||
|
|
||||||
# 3. The "Unignore" rule (Whitelisting)
|
|
||||||
# We only unignore .py files that aren't already blocked by the rules above
|
|
||||||
!**/*.py
|
!**/*.py
|
||||||
|
/Data/
|
||||||
|
/attach/
|
||||||
|
/results/
|
||||||
|
/enarcelona/
|
||||||
|
.env
|
||||||
|
|||||||
@@ -1,570 +0,0 @@
|
|||||||
# %% Scatter
|
|
||||||
import pandas as pd
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
# Load your data from TSV file
|
|
||||||
file_path = '/home/shahin/Lab/Doktorarbeit/Barcelona/Data/join_MS_Briefe_400_with_unique_id_SHA3_explore_cleaned_results+MS_Briefe_400_with_unique_id_SHA3_explore_cleaned.tsv'
|
|
||||||
df = pd.read_csv(file_path, sep='\t')
|
|
||||||
|
|
||||||
# Replace comma with dot for numeric conversion in GT_EDSS and LLM_Results
|
|
||||||
df['GT_EDSS'] = df['GT_EDSS'].astype(str).str.replace(',', '.')
|
|
||||||
df['LLM_Results'] = df['LLM_Results'].astype(str).str.replace(',', '.')
|
|
||||||
|
|
||||||
# Convert to float (handle invalid entries gracefully)
|
|
||||||
df['GT_EDSS'] = pd.to_numeric(df['GT_EDSS'], errors='coerce')
|
|
||||||
df['LLM_Results'] = pd.to_numeric(df['LLM_Results'], errors='coerce')
|
|
||||||
|
|
||||||
# Drop rows where either column is NaN
|
|
||||||
df_clean = df.dropna(subset=['GT_EDSS', 'LLM_Results'])
|
|
||||||
|
|
||||||
# Create scatter plot
|
|
||||||
plt.figure(figsize=(8, 6))
|
|
||||||
plt.scatter(df_clean['GT_EDSS'], df_clean['LLM_Results'], alpha=0.7, color='blue')
|
|
||||||
|
|
||||||
# Add labels and title
|
|
||||||
plt.xlabel('GT_EDSS')
|
|
||||||
plt.ylabel('LLM_Results')
|
|
||||||
plt.title('Comparison of GT_EDSS vs LLM_Results')
|
|
||||||
|
|
||||||
# Optional: Add a diagonal line for reference (perfect prediction)
|
|
||||||
plt.plot([0, max(df_clean['GT_EDSS'])], [0, max(df_clean['GT_EDSS'])], color='red', linestyle='--', label='Perfect Prediction')
|
|
||||||
plt.legend()
|
|
||||||
|
|
||||||
# Show plot
|
|
||||||
plt.grid(True)
|
|
||||||
plt.tight_layout()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
##
|
|
||||||
|
|
||||||
|
|
||||||
# %% Bland0-altman
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import numpy as np
|
|
||||||
import statsmodels.api as sm
|
|
||||||
|
|
||||||
# Load your data from TSV file
|
|
||||||
file_path = '/home/shahin/Lab/Doktorarbeit/Barcelona/Data/join_MS_Briefe_400_with_unique_id_SHA3_explore_cleaned_results+MS_Briefe_400_with_unique_id_SHA3_explore_cleaned.tsv'
|
|
||||||
df = pd.read_csv(file_path, sep='\t')
|
|
||||||
|
|
||||||
# Replace comma with dot for numeric conversion in GT_EDSS and LLM_Results
|
|
||||||
df['GT_EDSS'] = df['GT_EDSS'].astype(str).str.replace(',', '.')
|
|
||||||
df['LLM_Results'] = df['LLM_Results'].astype(str).str.replace(',', '.')
|
|
||||||
|
|
||||||
# Convert to float (handle invalid entries gracefully)
|
|
||||||
df['GT_EDSS'] = pd.to_numeric(df['GT_EDSS'], errors='coerce')
|
|
||||||
df['LLM_Results'] = pd.to_numeric(df['LLM_Results'], errors='coerce')
|
|
||||||
|
|
||||||
# Drop rows where either column is NaN
|
|
||||||
df_clean = df.dropna(subset=['GT_EDSS', 'LLM_Results'])
|
|
||||||
|
|
||||||
# Create Bland-Altman plot
|
|
||||||
f, ax = plt.subplots(1, figsize=(8, 5))
|
|
||||||
sm.graphics.mean_diff_plot(df_clean['GT_EDSS'], df_clean['LLM_Results'], ax=ax)
|
|
||||||
|
|
||||||
# Add labels and title
|
|
||||||
ax.set_title('Bland-Altman Plot: GT_EDSS vs LLM_Results')
|
|
||||||
ax.set_xlabel('Mean of GT_EDSS and LLM_Results')
|
|
||||||
ax.set_ylabel('Difference between GT_EDSS and LLM_Results')
|
|
||||||
|
|
||||||
# Display Bland-Altman plot
|
|
||||||
plt.tight_layout()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# Print some statistics
|
|
||||||
mean_diff = np.mean(df_clean['GT_EDSS'] - df_clean['LLM_Results'])
|
|
||||||
std_diff = np.std(df_clean['GT_EDSS'] - df_clean['LLM_Results'])
|
|
||||||
print(f"Mean difference: {mean_diff:.3f}")
|
|
||||||
print(f"Standard deviation of differences: {std_diff:.3f}")
|
|
||||||
print(f"95% Limits of Agreement: [{mean_diff - 1.96*std_diff:.3f}, {mean_diff + 1.96*std_diff:.3f}]")
|
|
||||||
|
|
||||||
##
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# %% Confusion matrix
|
|
||||||
import pandas as pd
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import numpy as np
|
|
||||||
from sklearn.metrics import confusion_matrix, classification_report
|
|
||||||
import seaborn as sns
|
|
||||||
|
|
||||||
# Load your data from TSV file
|
|
||||||
file_path = '/home/shahin/Lab/Doktorarbeit/Barcelona/Data/Join_edssandsub.tsv'
|
|
||||||
df = pd.read_csv(file_path, sep='\t')
|
|
||||||
|
|
||||||
# Replace comma with dot for numeric conversion in GT.EDSS and result.EDSS
|
|
||||||
df['GT.EDSS'] = df['GT.EDSS'].astype(str).str.replace(',', '.')
|
|
||||||
df['result.EDSS'] = df['result.EDSS'].astype(str).str.replace(',', '.')
|
|
||||||
|
|
||||||
# Convert to float (handle invalid entries gracefully)
|
|
||||||
df['GT.EDSS'] = pd.to_numeric(df['GT.EDSS'], errors='coerce')
|
|
||||||
df['result.EDSS'] = pd.to_numeric(df['result.EDSS'], errors='coerce')
|
|
||||||
|
|
||||||
# Drop rows where either column is NaN
|
|
||||||
df_clean = df.dropna(subset=['GT.EDSS', 'result.EDSS'])
|
|
||||||
|
|
||||||
# For confusion matrix, we need to categorize the values
|
|
||||||
# Let's create categories up to 10 (0-1, 1-2, 2-3, ..., 9-10)
|
|
||||||
def categorize_edss(value):
|
|
||||||
if pd.isna(value):
|
|
||||||
return np.nan
|
|
||||||
elif value <= 1.0:
|
|
||||||
return '0-1'
|
|
||||||
elif value <= 2.0:
|
|
||||||
return '1-2'
|
|
||||||
elif value <= 3.0:
|
|
||||||
return '2-3'
|
|
||||||
elif value <= 4.0:
|
|
||||||
return '3-4'
|
|
||||||
elif value <= 5.0:
|
|
||||||
return '4-5'
|
|
||||||
elif value <= 6.0:
|
|
||||||
return '5-6'
|
|
||||||
elif value <= 7.0:
|
|
||||||
return '6-7'
|
|
||||||
elif value <= 8.0:
|
|
||||||
return '7-8'
|
|
||||||
elif value <= 9.0:
|
|
||||||
return '8-9'
|
|
||||||
elif value <= 10.0:
|
|
||||||
return '9-10'
|
|
||||||
else:
|
|
||||||
return '10+'
|
|
||||||
|
|
||||||
# Create categorical versions
|
|
||||||
df_clean['GT.EDSS_cat'] = df_clean['GT.EDSS'].apply(categorize_edss)
|
|
||||||
df_clean['result.EDSS_cat'] = df_clean['result.EDSS'].apply(categorize_edss)
|
|
||||||
|
|
||||||
# Remove any NaN categories
|
|
||||||
df_clean = df_clean.dropna(subset=['GT.EDSS_cat', 'result.EDSS_cat'])
|
|
||||||
|
|
||||||
# Create confusion matrix
|
|
||||||
cm = confusion_matrix(df_clean['GT.EDSS_cat'], df_clean['result.EDSS_cat'],
|
|
||||||
labels=['0-1', '1-2', '2-3', '3-4', '4-5', '5-6', '6-7', '7-8', '8-9', '9-10'])
|
|
||||||
|
|
||||||
# Plot confusion matrix
|
|
||||||
plt.figure(figsize=(10, 8))
|
|
||||||
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
|
|
||||||
xticklabels=['0-1', '1-2', '2-3', '3-4', '4-5', '5-6', '6-7', '7-8', '8-9', '9-10'],
|
|
||||||
yticklabels=['0-1', '1-2', '2-3', '3-4', '4-5', '5-6', '6-7', '7-8', '8-9', '9-10'])
|
|
||||||
plt.title('Confusion Matrix: Ground truth EDSS vs interferred EDSS (Categorized 0-10)')
|
|
||||||
plt.xlabel('LLM Generated EDSS')
|
|
||||||
plt.ylabel('Ground Truth EDSS')
|
|
||||||
plt.tight_layout()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# Print classification report
|
|
||||||
print("Classification Report:")
|
|
||||||
print(classification_report(df_clean['GT.EDSS_cat'], df_clean['result.EDSS_cat']))
|
|
||||||
|
|
||||||
# Print raw counts
|
|
||||||
print("\nConfusion Matrix (Raw Counts):")
|
|
||||||
print(cm)
|
|
||||||
|
|
||||||
##
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# %% Classification
|
|
||||||
import pandas as pd
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import seaborn as sns
|
|
||||||
from sklearn.metrics import confusion_matrix
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
# Load your data from TSV file
|
|
||||||
file_path ='/home/shahin/Lab/Doktorarbeit/Barcelona/Data/Join_edssandsub.tsv'
|
|
||||||
|
|
||||||
df = pd.read_csv(file_path, sep='\t')
|
|
||||||
|
|
||||||
# Check data structure
|
|
||||||
print("Data shape:", df.shape)
|
|
||||||
print("First few rows:")
|
|
||||||
print(df.head())
|
|
||||||
print("\nColumn names:")
|
|
||||||
for col in df.columns:
|
|
||||||
print(f" {col}")
|
|
||||||
|
|
||||||
# Function to safely convert to boolean
|
|
||||||
def safe_bool_convert(series):
|
|
||||||
'''Safely convert series to boolean, handling various input formats'''
|
|
||||||
# Convert to string first, then to boolean
|
|
||||||
series_str = series.astype(str).str.strip().str.lower()
|
|
||||||
|
|
||||||
# Handle different true/false representations
|
|
||||||
bool_map = {
|
|
||||||
'true': True, '1': True, 'yes': True, 'y': True,
|
|
||||||
'false': False, '0': False, 'no': False, 'n': False
|
|
||||||
}
|
|
||||||
|
|
||||||
converted = series_str.map(bool_map)
|
|
||||||
|
|
||||||
# Handle remaining NaN values
|
|
||||||
converted = converted.fillna(False) # or True, depending on your preference
|
|
||||||
|
|
||||||
return converted
|
|
||||||
|
|
||||||
# Convert columns safely
|
|
||||||
if 'result.klassifizierbar' in df.columns:
|
|
||||||
print("\nresult.klassifizierbar column info:")
|
|
||||||
print(df['result.klassifizierbar'].head(10))
|
|
||||||
print("Unique values:", df['result.klassifizierbar'].unique())
|
|
||||||
|
|
||||||
df['result.klassifizierbar'] = safe_bool_convert(df['result.klassifizierbar'])
|
|
||||||
print("After conversion:")
|
|
||||||
print(df['result.klassifizierbar'].value_counts())
|
|
||||||
|
|
||||||
if 'GT.klassifizierbar' in df.columns:
|
|
||||||
print("\nGT.klassifizierbar column info:")
|
|
||||||
print(df['GT.klassifizierbar'].head(10))
|
|
||||||
print("Unique values:", df['GT.klassifizierbar'].unique())
|
|
||||||
|
|
||||||
df['GT.klassifizierbar'] = safe_bool_convert(df['GT.klassifizierbar'])
|
|
||||||
print("After conversion:")
|
|
||||||
print(df['GT.klassifizierbar'].value_counts())
|
|
||||||
|
|
||||||
# Create bar chart showing only True values for klassifizierbar
|
|
||||||
if 'result.klassifizierbar' in df.columns and 'GT.klassifizierbar' in df.columns:
|
|
||||||
# Get counts for True values only
|
|
||||||
llm_true_count = df['result.klassifizierbar'].sum()
|
|
||||||
gt_true_count = df['GT.klassifizierbar'].sum()
|
|
||||||
|
|
||||||
# Plot using matplotlib directly
|
|
||||||
fig, ax = plt.subplots(figsize=(8, 6))
|
|
||||||
|
|
||||||
x = np.arange(2)
|
|
||||||
width = 0.35
|
|
||||||
|
|
||||||
bars1 = ax.bar(x[0] - width/2, llm_true_count, width, label='LLM', color='skyblue', alpha=0.8)
|
|
||||||
bars2 = ax.bar(x[1] + width/2, gt_true_count, width, label='GT', color='lightcoral', alpha=0.8)
|
|
||||||
|
|
||||||
# Add value labels on bars
|
|
||||||
ax.annotate(f'{llm_true_count}',
|
|
||||||
xy=(x[0], llm_true_count),
|
|
||||||
xytext=(0, 3),
|
|
||||||
textcoords="offset points",
|
|
||||||
ha='center', va='bottom')
|
|
||||||
|
|
||||||
ax.annotate(f'{gt_true_count}',
|
|
||||||
xy=(x[1], gt_true_count),
|
|
||||||
xytext=(0, 3),
|
|
||||||
textcoords="offset points",
|
|
||||||
ha='center', va='bottom')
|
|
||||||
|
|
||||||
ax.set_xlabel('Classification Status (klassifizierbar)')
|
|
||||||
ax.set_ylabel('Count')
|
|
||||||
ax.set_title('True Values Comparison: LLM vs GT for "klassifizierbar"')
|
|
||||||
ax.set_xticks(x)
|
|
||||||
ax.set_xticklabels(['LLM', 'GT'])
|
|
||||||
ax.legend()
|
|
||||||
|
|
||||||
plt.tight_layout()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# Create confusion matrix if both columns exist
|
|
||||||
if 'result.klassifizierbar' in df.columns and 'GT.klassifizierbar' in df.columns:
|
|
||||||
try:
|
|
||||||
# Ensure both columns are boolean
|
|
||||||
llm_bool = df['result.klassifizierbar'].fillna(False).astype(bool)
|
|
||||||
gt_bool = df['GT.klassifizierbar'].fillna(False).astype(bool)
|
|
||||||
|
|
||||||
cm = confusion_matrix(gt_bool, llm_bool)
|
|
||||||
|
|
||||||
# Plot confusion matrix
|
|
||||||
fig, ax = plt.subplots(figsize=(8, 6))
|
|
||||||
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
|
|
||||||
xticklabels=['False ', 'True '],
|
|
||||||
yticklabels=['False', 'True '],
|
|
||||||
ax=ax)
|
|
||||||
ax.set_xlabel('LLM Predictions ')
|
|
||||||
ax.set_ylabel('GT Labels ')
|
|
||||||
ax.set_title('Confusion Matrix: LLM vs GT for "klassifizierbar"')
|
|
||||||
|
|
||||||
plt.tight_layout()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
print("Confusion Matrix:")
|
|
||||||
print(cm)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error creating confusion matrix: {e}")
|
|
||||||
|
|
||||||
# Show final data info
|
|
||||||
print("\nFinal DataFrame info:")
|
|
||||||
print(df[['result.klassifizierbar', 'GT.klassifizierbar']].info())
|
|
||||||
|
|
||||||
##
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# %% Boxplot
|
|
||||||
import pandas as pd
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import seaborn as sns
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
# Load your data from TSV file
|
|
||||||
file_path = '/home/shahin/Lab/Doktorarbeit/Barcelona/Data/join_results_unique.tsv'
|
|
||||||
df = pd.read_csv(file_path, sep='\t')
|
|
||||||
|
|
||||||
# Replace comma with dot for numeric conversion in GT.EDSS and result.EDSS
|
|
||||||
df['GT.EDSS'] = df['GT.EDSS'].astype(str).str.replace(',', '.')
|
|
||||||
df['result.EDSS'] = df['result.EDSS'].astype(str).str.replace(',', '.')
|
|
||||||
|
|
||||||
# Convert to float (handle invalid entries gracefully)
|
|
||||||
df['GT.EDSS'] = pd.to_numeric(df['GT.EDSS'], errors='coerce')
|
|
||||||
df['result.EDSS'] = pd.to_numeric(df['result.EDSS'], errors='coerce')
|
|
||||||
|
|
||||||
# Drop rows where either column is NaN
|
|
||||||
df_clean = df.dropna(subset=['GT.EDSS', 'result.EDSS'])
|
|
||||||
|
|
||||||
# 1. DEFINE CATEGORY ORDER
|
|
||||||
# This ensures the X-axis is numerically logical (0-1 comes before 1-2)
|
|
||||||
category_order = ['0-1', '1-2', '2-3', '3-4', '4-5', '5-6', '6-7', '7-8', '8-9', '9-10', '10+']
|
|
||||||
|
|
||||||
# Convert the column to a Categorical type with the specific order
|
|
||||||
df_clean['GT.EDSS_cat'] = pd.Categorical(df_clean['GT.EDSS'].apply(categorize_edss),
|
|
||||||
categories=category_order,
|
|
||||||
ordered=True)
|
|
||||||
|
|
||||||
plt.figure(figsize=(14, 8))
|
|
||||||
|
|
||||||
# 2. ADD HUE FOR LEGEND
|
|
||||||
# Assigning x to 'hue' allows Seaborn to generate a legend automatically
|
|
||||||
box_plot = sns.boxplot(
|
|
||||||
data=df_clean,
|
|
||||||
x='GT.EDSS_cat',
|
|
||||||
y='result.EDSS',
|
|
||||||
hue='GT.EDSS_cat', # Added hue
|
|
||||||
palette='viridis',
|
|
||||||
linewidth=1.5,
|
|
||||||
legend=True # Ensure legend is enabled
|
|
||||||
)
|
|
||||||
|
|
||||||
# 3. CUSTOMIZE PLOT
|
|
||||||
plt.title('Distribution of result.EDSS by GT.EDSS Category', fontsize=18, pad=20)
|
|
||||||
plt.xlabel('Ground Truth EDSS Category', fontsize=14)
|
|
||||||
plt.ylabel('LLM Predicted EDSS', fontsize=14)
|
|
||||||
|
|
||||||
# Move legend to the side or top
|
|
||||||
plt.legend(title="EDSS Categories", bbox_to_anchor=(1.05, 1), loc='upper left')
|
|
||||||
|
|
||||||
plt.xticks(rotation=45, ha='right', fontsize=10)
|
|
||||||
plt.grid(True, axis='y', alpha=0.3)
|
|
||||||
plt.tight_layout()
|
|
||||||
|
|
||||||
plt.show()
|
|
||||||
##
|
|
||||||
|
|
||||||
|
|
||||||
# %% Postproccessing Column names
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
# Read the TSV file
|
|
||||||
file_path = '/home/shahin/Lab/Doktorarbeit/Barcelona/Data/Join_edssandsub.tsv'
|
|
||||||
df = pd.read_csv(file_path, sep='\t')
|
|
||||||
|
|
||||||
# Create a mapping dictionary for German to English column names
|
|
||||||
column_mapping = {
|
|
||||||
'EDSS':'GT.EDSS',
|
|
||||||
'klassifizierbar': 'GT.klassifizierbar',
|
|
||||||
'Sehvermögen': 'GT.VISUAL_OPTIC_FUNCTIONS',
|
|
||||||
'Cerebellum': 'GT.CEREBELLAR_FUNCTIONS',
|
|
||||||
'Hirnstamm': 'GT.BRAINSTEM_FUNCTIONS',
|
|
||||||
'Sensibiliät': 'GT.SENSORY_FUNCTIONS',
|
|
||||||
'Pyramidalmotorik': 'GT.PYRAMIDAL_FUNCTIONS',
|
|
||||||
'Ambulation': 'GT.AMBULATION',
|
|
||||||
'Cerebrale_Funktion': 'GT.CEREBRAL_FUNCTIONS',
|
|
||||||
'Blasen-_und_Mastdarmfunktion': 'GT.BOWEL_AND_BLADDER_FUNCTIONS'
|
|
||||||
}
|
|
||||||
|
|
||||||
# Rename columns
|
|
||||||
df = df.rename(columns=column_mapping)
|
|
||||||
|
|
||||||
# Save the modified dataframe back to TSV file
|
|
||||||
df.to_csv(file_path, sep='\t', index=False)
|
|
||||||
|
|
||||||
print("Columns have been successfully renamed!")
|
|
||||||
print("Renamed columns:")
|
|
||||||
for old_name, new_name in column_mapping.items():
|
|
||||||
if old_name in df.columns:
|
|
||||||
print(f" {old_name} -> {new_name}")
|
|
||||||
|
|
||||||
|
|
||||||
##
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# %% Styled table
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import seaborn as sns
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import dataframe_image as dfi
|
|
||||||
# Load data
|
|
||||||
df = pd.read_csv("/home/shahin/Lab/Doktorarbeit/Barcelona/Data/Join_edssandsub.tsv", sep='\t')
|
|
||||||
|
|
||||||
# 1. Identify all GT and result columns
|
|
||||||
gt_columns = [col for col in df.columns if col.startswith('GT.')]
|
|
||||||
result_columns = [col for col in df.columns if col.startswith('result.')]
|
|
||||||
|
|
||||||
print("GT Columns found:", gt_columns)
|
|
||||||
print("Result Columns found:", result_columns)
|
|
||||||
|
|
||||||
# 2. Create proper mapping between GT and result columns
|
|
||||||
# Handle various naming conventions (spaces, underscores, etc.)
|
|
||||||
column_mapping = {}
|
|
||||||
|
|
||||||
for gt_col in gt_columns:
|
|
||||||
base_name = gt_col.replace('GT.', '')
|
|
||||||
|
|
||||||
# Clean the base name for matching - remove spaces, underscores, etc.
|
|
||||||
# Try different matching approaches
|
|
||||||
candidates = [
|
|
||||||
f'result.{base_name}', # Exact match
|
|
||||||
f'result.{base_name.replace(" ", "_")}', # With underscores
|
|
||||||
f'result.{base_name.replace("_", " ")}', # With spaces
|
|
||||||
f'result.{base_name.replace(" ", "")}', # No spaces
|
|
||||||
f'result.{base_name.replace("_", "")}' # No underscores
|
|
||||||
]
|
|
||||||
|
|
||||||
# Also try case-insensitive matching
|
|
||||||
candidates.append(f'result.{base_name.lower()}')
|
|
||||||
candidates.append(f'result.{base_name.upper()}')
|
|
||||||
|
|
||||||
# Try to find matching result column
|
|
||||||
matched = False
|
|
||||||
for candidate in candidates:
|
|
||||||
if candidate in result_columns:
|
|
||||||
column_mapping[gt_col] = candidate
|
|
||||||
matched = True
|
|
||||||
break
|
|
||||||
|
|
||||||
# If no exact match found, try partial matching
|
|
||||||
if not matched:
|
|
||||||
# Try to match by removing special characters and comparing
|
|
||||||
base_clean = ''.join(e for e in base_name if e.isalnum() or e in ['_', ' '])
|
|
||||||
for result_col in result_columns:
|
|
||||||
result_base = result_col.replace('result.', '')
|
|
||||||
result_clean = ''.join(e for e in result_base if e.isalnum() or e in ['_', ' '])
|
|
||||||
if base_clean.lower() == result_clean.lower():
|
|
||||||
column_mapping[gt_col] = result_col
|
|
||||||
matched = True
|
|
||||||
break
|
|
||||||
|
|
||||||
print("Column mapping:", column_mapping)
|
|
||||||
|
|
||||||
# 3. Faster, vectorized computation using the corrected mapping
|
|
||||||
data_list = []
|
|
||||||
|
|
||||||
for gt_col, result_col in column_mapping.items():
|
|
||||||
print(f"Processing {gt_col} vs {result_col}")
|
|
||||||
|
|
||||||
# Convert to numeric, forcing errors to NaN
|
|
||||||
s1 = pd.to_numeric(df[gt_col], errors='coerce').astype(float)
|
|
||||||
s2 = pd.to_numeric(df[result_col], errors='coerce').astype(float)
|
|
||||||
|
|
||||||
# Calculate matches (abs difference <= 0.5)
|
|
||||||
diff = np.abs(s1 - s2)
|
|
||||||
matches = (diff <= 0.5).sum()
|
|
||||||
|
|
||||||
# Determine the denominator (total valid comparisons)
|
|
||||||
valid_count = diff.notna().sum()
|
|
||||||
|
|
||||||
if valid_count > 0:
|
|
||||||
percentage = (matches / valid_count) * 100
|
|
||||||
else:
|
|
||||||
percentage = 0
|
|
||||||
|
|
||||||
# Extract clean base name for display
|
|
||||||
base_name = gt_col.replace('GT.', '')
|
|
||||||
|
|
||||||
data_list.append({
|
|
||||||
'GT': base_name,
|
|
||||||
'Match %': round(percentage, 1)
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 4. Prepare Data
|
|
||||||
match_df = pd.DataFrame(data_list)
|
|
||||||
# Clean up labels: Replace underscores with spaces and capitalize
|
|
||||||
match_df['GT'] = match_df['GT'].str.replace('_', ' ').str.title()
|
|
||||||
match_df = match_df.sort_values('Match %', ascending=False)
|
|
||||||
|
|
||||||
# 5. Create a "Beautiful" Table using Seaborn Heatmap
|
|
||||||
def create_luxury_table(df, output_file="edss_agreement.png"):
|
|
||||||
# Set the aesthetic style
|
|
||||||
sns.set_theme(style="white", font="sans-serif")
|
|
||||||
|
|
||||||
# Prepare data for heatmap
|
|
||||||
plot_data = df.set_index('GT')[['Match %']]
|
|
||||||
|
|
||||||
# Initialize the figure
|
|
||||||
# Height is dynamic based on number of rows
|
|
||||||
fig, ax = plt.subplots(figsize=(8, len(df) * 0.6))
|
|
||||||
|
|
||||||
# Create a custom diverging color map (Deep Red -> Mustard -> Emerald)
|
|
||||||
# This looks more professional than standard 'RdYlGn'
|
|
||||||
cmap = sns.diverging_palette(15, 135, s=80, l=55, as_cmap=True)
|
|
||||||
|
|
||||||
# Draw the heatmap
|
|
||||||
sns.heatmap(
|
|
||||||
plot_data,
|
|
||||||
annot=True,
|
|
||||||
fmt=".1f",
|
|
||||||
cmap=cmap,
|
|
||||||
center=85, # Centers the color transition
|
|
||||||
vmin=50, vmax=100, # Range of the gradient
|
|
||||||
linewidths=2,
|
|
||||||
linecolor='white',
|
|
||||||
cbar=False, # Remove color bar for a "table" look
|
|
||||||
annot_kws={"size": 14, "weight": "bold", "family": "sans-serif"}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Styling the Axes (Turning the heatmap into a table)
|
|
||||||
ax.set_xlabel("")
|
|
||||||
ax.set_ylabel("")
|
|
||||||
ax.xaxis.tick_top() # Move "Match %" label to top
|
|
||||||
ax.set_xticklabels(['Agreement (%)'], fontsize=14, fontweight='bold', color='#2c3e50')
|
|
||||||
ax.tick_params(axis='y', labelsize=12, labelcolor='#2c3e50', length=0)
|
|
||||||
|
|
||||||
# Add a thin border around the plot
|
|
||||||
for _, spine in ax.spines.items():
|
|
||||||
spine.set_visible(True)
|
|
||||||
spine.set_color('#ecf0f1')
|
|
||||||
|
|
||||||
plt.title('EDSS Subcategory Consistency Analysis', fontsize=16, pad=40, fontweight='bold', color='#2c3e50')
|
|
||||||
|
|
||||||
# Add a subtle footer
|
|
||||||
plt.figtext(0.5, 0.0, "Tolerance: ±0.5 points",
|
|
||||||
wrap=True, horizontalalignment='center', fontsize=10, color='gray', style='italic')
|
|
||||||
|
|
||||||
# Save with high resolution
|
|
||||||
plt.tight_layout()
|
|
||||||
plt.savefig(output_file, dpi=300, bbox_inches='tight')
|
|
||||||
print(f"Beautiful table saved as {output_file}")
|
|
||||||
|
|
||||||
# Execute
|
|
||||||
create_luxury_table(match_df)
|
|
||||||
|
|
||||||
|
|
||||||
# Run the function
|
|
||||||
save_styled_table(match_df)
|
|
||||||
# 6. Save as SVG
|
|
||||||
|
|
||||||
plt.savefig("agreement_table.svg", format='svg', dpi=300, bbox_inches='tight')
|
|
||||||
print("Successfully saved agreement_table.svg")
|
|
||||||
|
|
||||||
# Show plot if running in a GUI environment
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
##
|
|
||||||
135
Data/style2.py
135
Data/style2.py
@@ -1,135 +0,0 @@
|
|||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import seaborn as sns
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import dataframe_image as dfi
|
|
||||||
# Load data
|
|
||||||
df = pd.read_csv("/home/shahin/Lab/Doktorarbeit/Barcelona/Data/Join_edssandsub.tsv", sep='\t')
|
|
||||||
|
|
||||||
# 1. Identify all GT and result columns
|
|
||||||
gt_columns = [col for col in df.columns if col.startswith('GT.')]
|
|
||||||
result_columns = [col for col in df.columns if col.startswith('result.')]
|
|
||||||
|
|
||||||
print("GT Columns found:", gt_columns)
|
|
||||||
print("Result Columns found:", result_columns)
|
|
||||||
|
|
||||||
# 2. Create proper mapping between GT and result columns
|
|
||||||
# Handle various naming conventions (spaces, underscores, etc.)
|
|
||||||
column_mapping = {}
|
|
||||||
|
|
||||||
for gt_col in gt_columns:
|
|
||||||
base_name = gt_col.replace('GT.', '')
|
|
||||||
|
|
||||||
# Clean the base name for matching - remove spaces, underscores, etc.
|
|
||||||
# Try different matching approaches
|
|
||||||
candidates = [
|
|
||||||
f'result.{base_name}', # Exact match
|
|
||||||
f'result.{base_name.replace(" ", "_")}', # With underscores
|
|
||||||
f'result.{base_name.replace("_", " ")}', # With spaces
|
|
||||||
f'result.{base_name.replace(" ", "")}', # No spaces
|
|
||||||
f'result.{base_name.replace("_", "")}' # No underscores
|
|
||||||
]
|
|
||||||
|
|
||||||
# Also try case-insensitive matching
|
|
||||||
candidates.append(f'result.{base_name.lower()}')
|
|
||||||
candidates.append(f'result.{base_name.upper()}')
|
|
||||||
|
|
||||||
# Try to find matching result column
|
|
||||||
matched = False
|
|
||||||
for candidate in candidates:
|
|
||||||
if candidate in result_columns:
|
|
||||||
column_mapping[gt_col] = candidate
|
|
||||||
matched = True
|
|
||||||
break
|
|
||||||
|
|
||||||
# If no exact match found, try partial matching
|
|
||||||
if not matched:
|
|
||||||
# Try to match by removing special characters and comparing
|
|
||||||
base_clean = ''.join(e for e in base_name if e.isalnum() or e in ['_', ' '])
|
|
||||||
for result_col in result_columns:
|
|
||||||
result_base = result_col.replace('result.', '')
|
|
||||||
result_clean = ''.join(e for e in result_base if e.isalnum() or e in ['_', ' '])
|
|
||||||
if base_clean.lower() == result_clean.lower():
|
|
||||||
column_mapping[gt_col] = result_col
|
|
||||||
matched = True
|
|
||||||
break
|
|
||||||
|
|
||||||
print("Column mapping:", column_mapping)
|
|
||||||
|
|
||||||
# 3. Faster, vectorized computation using the corrected mapping
|
|
||||||
data_list = []
|
|
||||||
|
|
||||||
for gt_col, result_col in column_mapping.items():
|
|
||||||
print(f"Processing {gt_col} vs {result_col}")
|
|
||||||
|
|
||||||
# Convert to numeric, forcing errors to NaN
|
|
||||||
s1 = pd.to_numeric(df[gt_col], errors='coerce').astype(float)
|
|
||||||
s2 = pd.to_numeric(df[result_col], errors='coerce').astype(float)
|
|
||||||
|
|
||||||
# Calculate matches (abs difference <= 0.5)
|
|
||||||
diff = np.abs(s1 - s2)
|
|
||||||
matches = (diff <= 0.5).sum()
|
|
||||||
|
|
||||||
# Determine the denominator (total valid comparisons)
|
|
||||||
valid_count = diff.notna().sum()
|
|
||||||
|
|
||||||
if valid_count > 0:
|
|
||||||
percentage = (matches / valid_count) * 100
|
|
||||||
else:
|
|
||||||
percentage = 0
|
|
||||||
|
|
||||||
# Extract clean base name for display
|
|
||||||
base_name = gt_col.replace('GT.', '')
|
|
||||||
|
|
||||||
data_list.append({
|
|
||||||
'GT': base_name,
|
|
||||||
'Match %': round(percentage, 1)
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 4. Prepare Data for Plotting
|
|
||||||
match_df = pd.DataFrame(data_list)
|
|
||||||
match_df = match_df.sort_values('Match %', ascending=False) # Sort for better visual flow
|
|
||||||
|
|
||||||
# 5. Create the Styled Gradient Table
|
|
||||||
def style_agreement_table(df):
|
|
||||||
return (df.style
|
|
||||||
.format({'Match %': '{:.1f}%'}) # Add % sign
|
|
||||||
.background_gradient(cmap='RdYlGn', subset=['Match %'], vmin=50, vmax=100) # Red to Green gradient
|
|
||||||
.set_properties(**{
|
|
||||||
'text-align': 'center',
|
|
||||||
'font-size': '12pt',
|
|
||||||
'border-collapse': 'collapse',
|
|
||||||
'border': '1px solid #D3D3D3'
|
|
||||||
})
|
|
||||||
.set_table_styles([
|
|
||||||
# Style the header
|
|
||||||
{'selector': 'th', 'props': [
|
|
||||||
('background-color', '#404040'),
|
|
||||||
('color', 'white'),
|
|
||||||
('font-weight', 'bold'),
|
|
||||||
('text-transform', 'uppercase'),
|
|
||||||
('padding', '10px')
|
|
||||||
]},
|
|
||||||
# Add hover effect
|
|
||||||
{'selector': 'tr:hover', 'props': [('background-color', '#f5f5f5')]}
|
|
||||||
])
|
|
||||||
.set_caption("EDSS Agreement Analysis: Ground Truth vs. Results (Tolerance ±0.5)")
|
|
||||||
)
|
|
||||||
|
|
||||||
# To display in a Jupyter Notebook:
|
|
||||||
styled_table = style_agreement_table(match_df)
|
|
||||||
styled_table
|
|
||||||
|
|
||||||
dfi.export(styled_table, "styled_table.png")
|
|
||||||
#styled_table.to_html("agreement_report.html")
|
|
||||||
# 6. Save as SVG
|
|
||||||
|
|
||||||
#plt.savefig("agreement_table.svg", format='svg', dpi=300, bbox_inches='tight')
|
|
||||||
#print("Successfully saved agreement_table.svg")
|
|
||||||
|
|
||||||
# Show plot if running in a GUI environment
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import seaborn as sns
|
|
||||||
|
|
||||||
# Sample data (replace with your actual df)
|
|
||||||
df = pd.read_csv("/home/shahin/Lab/Doktorarbeit/Barcelona/Data/Join_edssandsub.tsv", sep='\t')
|
|
||||||
|
|
||||||
# Identify GT and Result columns
|
|
||||||
gt_columns = [col for col in df.columns if col.startswith('GT.')]
|
|
||||||
result_columns = [col for col in df.columns if col.startswith('result.')]
|
|
||||||
|
|
||||||
# Create mapping
|
|
||||||
column_mapping = {}
|
|
||||||
for gt_col in gt_columns:
|
|
||||||
base_name = gt_col.replace('GT.', '')
|
|
||||||
result_col = f'result.{base_name}'
|
|
||||||
if result_col in result_columns:
|
|
||||||
column_mapping[gt_col] = result_col
|
|
||||||
|
|
||||||
# Function to compute match percentage for each GT-Result pair
|
|
||||||
def compute_match_percentages(df, column_mapping):
|
|
||||||
percentages = []
|
|
||||||
for gt_col, result_col in column_mapping.items():
|
|
||||||
count = 0
|
|
||||||
total = len(df)
|
|
||||||
|
|
||||||
for _, row in df.iterrows():
|
|
||||||
gt_val = row[gt_col]
|
|
||||||
result_val = row[result_col]
|
|
||||||
|
|
||||||
# Handle NaN values
|
|
||||||
if pd.isna(gt_val) or pd.isna(result_val):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Handle non-numeric values
|
|
||||||
try:
|
|
||||||
gt_float = float(gt_val)
|
|
||||||
result_float = float(result_val)
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
# Skip rows with non-numeric values
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if values are within 0.5 tolerance
|
|
||||||
if abs(gt_float - result_float) <= 0.5:
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
percentage = (count / total) * 100
|
|
||||||
percentages.append({
|
|
||||||
'GT_Column': gt_col,
|
|
||||||
'Result_Column': result_col,
|
|
||||||
'Match_Percentage': round(percentage, 1)
|
|
||||||
})
|
|
||||||
|
|
||||||
return pd.DataFrame(percentages)
|
|
||||||
|
|
||||||
# Compute match percentages
|
|
||||||
match_df = compute_match_percentages(df, column_mapping)
|
|
||||||
|
|
||||||
# Create a pivot table for gradient display (optional but helpful)
|
|
||||||
pivot_table = match_df.set_index(['GT_Column', 'Result_Column'])['Match_Percentage'].unstack(fill_value=0)
|
|
||||||
|
|
||||||
# Apply gradient background
|
|
||||||
cm = sns.light_palette("green", as_cmap=True)
|
|
||||||
styled_table = pivot_table.style.background_gradient(cmap=cm, axis=None)
|
|
||||||
|
|
||||||
# Display result
|
|
||||||
print("Agreement Percentage Table (with gradient):")
|
|
||||||
styled_table
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Save the styled table to a file
|
|
||||||
styled_table.to_html("agreement_report.html")
|
|
||||||
print("Report saved to agreement_report.html")
|
|
||||||
5
app.py
5
app.py
@@ -214,8 +214,3 @@ if __name__ == "__main__":
|
|||||||
print(f"Results saved to {output_json}")
|
print(f"Results saved to {output_json}")
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# %% name
|
|
||||||
eXXXXXXXX
|
|
||||||
##
|
|
||||||
|
|||||||
149
total_app.py
149
total_app.py
@@ -1,149 +0,0 @@
|
|||||||
import time
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from datetime import datetime
|
|
||||||
import pandas as pd
|
|
||||||
from openai import OpenAI
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
# Load environment variables
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# === CONFIGURATION ===
|
|
||||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
|
||||||
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL")
|
|
||||||
MODEL_NAME = "GPT-OSS-120B"
|
|
||||||
HEALTH_URL = f"{OPENAI_BASE_URL}/health" # Placeholder - actual health check would need to be implemented
|
|
||||||
CHAT_URL = f"{OPENAI_BASE_URL}/chat/completions"
|
|
||||||
# File paths
|
|
||||||
INPUT_CSV = "/home/shahin/Lab/Doktorarbeit/Barcelona/Data/MS_Briefe_400_with_unique_id_SHA3_explore_cleaned_unique.csv"
|
|
||||||
EDSS_INSTRUCTIONS_PATH = "/home/shahin/Lab/Doktorarbeit/Barcelona/attach/Komplett.txt"
|
|
||||||
#GRAMMAR_FILE = "/home/shahin/Lab/Doktorarbeit/Barcelona/attach/just_edss_schema.gbnf"
|
|
||||||
# Initialize OpenAI client
|
|
||||||
client = OpenAI(
|
|
||||||
api_key=OPENAI_API_KEY,
|
|
||||||
base_url=OPENAI_BASE_URL
|
|
||||||
)
|
|
||||||
# Read EDSS instructions from file
|
|
||||||
with open(EDSS_INSTRUCTIONS_PATH, 'r') as f:
|
|
||||||
EDSS_INSTRUCTIONS = f.read().strip()
|
|
||||||
|
|
||||||
# === RUN INFERENCE 2 ===
|
|
||||||
def run_inference(patient_text, max_retries=3):
|
|
||||||
prompt = f'''Du bist ein medizinischer Assistent, der spezialisiert darauf ist, EDSS-Scores (Expanded Disability Status Scale) sowie alle Unterkategorien aus klinischen Berichten zu extrahieren.
|
|
||||||
### Regeln für die Ausgabe:
|
|
||||||
1. **Reason**: Erstelle eine prägnante Zusammenfassung (max. 400 Zeichen) der Befunde auf **DEUTSCH**, die zur Einstufung führen.
|
|
||||||
2. **klassifizierbar**:
|
|
||||||
- Setze dies auf **true**, wenn ein EDSS-Wert identifiziert, berechnet oder basierend auf den klinischen Hinweisen plausibel geschätzt werden kann.
|
|
||||||
- Setze dies auf **false**, NUR wenn die Daten absolut unzureichend oder so widersprüchlich sind, dass keinerlei Einstufung möglich ist.
|
|
||||||
3. **EDSS**:
|
|
||||||
- Dieses Feld ist **VERPFLICHTEND**, wenn "klassifizierbar" auf true steht.
|
|
||||||
- Es muss eine Zahl zwischen 0.0 und 10.0 sein.
|
|
||||||
- Versuche stets, den EDSS-Wert so präzise wie möglich zu bestimmen, auch wenn die Datenlage dünn ist (nutze verfügbare Informationen zu Gehstrecke und Funktionssystemen).
|
|
||||||
- Dieses Feld **DARF NICHT ERSCHEINEN**, wenn "klassifizierbar" auf false steht.
|
|
||||||
4. **Unterkategorien**:
|
|
||||||
- Extrahiere alle folgenden Unterkategorien aus dem Bericht:
|
|
||||||
- VISUAL OPTIC FUNCTIONS (max. 6.0)
|
|
||||||
- BRAINSTEM FUNCTIONS (max. 6.0)
|
|
||||||
- PYRAMIDAL FUNCTIONS (max. 6.0)
|
|
||||||
- CEREBELLAR FUNCTIONS (max. 6.0)
|
|
||||||
- SENSORY FUNCTIONS (max. 6.0)
|
|
||||||
- BOWEL AND BLADDER FUNCTIONS (max. 6.0)
|
|
||||||
- CEREBRAL FUNCTIONS (max. 6.0)
|
|
||||||
- AMBULATION (max. 10.0)
|
|
||||||
- Jede Unterkategorie sollte eine Zahl zwischen 0.0 und der jeweiligen Obergrenze enthalten, wenn sie klassifizierbar ist
|
|
||||||
- Wenn eine Unterkategorie nicht klassifizierbar ist, setze den Wert auf null
|
|
||||||
### Einschränkungen:
|
|
||||||
- Erfinde keine Fakten, aber nutze klinische Herleitungen aus dem Bericht, um den EDSS und die Unterkategorien zu bestimmen.
|
|
||||||
- Priorisiere die Vergabe eines EDSS-Wertes gegenüber der Markierung als nicht klassifizierbar.
|
|
||||||
- Halte dich strikt an die JSON-Struktur.
|
|
||||||
- Die Unterkategorien müssen immer enthalten sein, auch wenn sie null sind.
|
|
||||||
EDSS-Bewertungsrichtlinien:
|
|
||||||
{EDSS_INSTRUCTIONS}
|
|
||||||
Patientenbericht:
|
|
||||||
{patient_text}
|
|
||||||
'''
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
for attempt in range(max_retries + 1):
|
|
||||||
try:
|
|
||||||
response = client.chat.completions.create(
|
|
||||||
messages=[
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": "You extract EDSS scores and all subcategories. You prioritize providing values even if data is partial, by using clinical inference."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": prompt
|
|
||||||
}
|
|
||||||
],
|
|
||||||
model=MODEL_NAME,
|
|
||||||
max_tokens=2048,
|
|
||||||
temperature=0.0,
|
|
||||||
response_format={"type": "json_object"}
|
|
||||||
)
|
|
||||||
content = response.choices[0].message.content
|
|
||||||
|
|
||||||
if content is None or content.strip() == "":
|
|
||||||
raise ValueError("API returned empty or None response content")
|
|
||||||
|
|
||||||
parsed = json.loads(content)
|
|
||||||
inference_time = time.time() - start_time
|
|
||||||
return {
|
|
||||||
"success": True,
|
|
||||||
"result": parsed,
|
|
||||||
"inference_time_sec": inference_time
|
|
||||||
}
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Attempt {attempt + 1} failed: {e}")
|
|
||||||
if attempt < max_retries:
|
|
||||||
time.sleep(2 ** attempt) # Exponential backoff
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
print("All retries exhausted.")
|
|
||||||
return {
|
|
||||||
"success": False,
|
|
||||||
"error": str(e),
|
|
||||||
"inference_time_sec": -1
|
|
||||||
}
|
|
||||||
|
|
||||||
# === BUILD PATIENT TEXT ===
|
|
||||||
def build_patient_text(row):
|
|
||||||
# Handle potential NaN or None values in the row
|
|
||||||
summary = str(row.get("T_Zusammenfassung", "")) if pd.notna(row.get("T_Zusammenfassung")) else ""
|
|
||||||
diagnoses = str(row.get("Diagnosen", "")) if pd.notna(row.get("Diagnosen")) else ""
|
|
||||||
clinical = str(row.get("T_KlinBef", "")) if pd.notna(row.get("T_KlinBef")) else ""
|
|
||||||
findings = str(row.get("T_Befunde", "")) if pd.notna(row.get("T_Befunde")) else ""
|
|
||||||
return "\n".join([summary, diagnoses, clinical, findings]).strip()
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# Read CSV file ONLY inside main block
|
|
||||||
df = pd.read_csv(INPUT_CSV, sep=';')
|
|
||||||
results = []
|
|
||||||
# Process each row
|
|
||||||
for idx, row in df.iterrows():
|
|
||||||
print(f"Processing row {idx + 1}/{len(df)}")
|
|
||||||
try:
|
|
||||||
patient_text = build_patient_text(row)
|
|
||||||
result = run_inference(patient_text)
|
|
||||||
# Add unique_id and MedDatum to result for tracking
|
|
||||||
result["unique_id"] = row.get("unique_id", f"row_{idx}")
|
|
||||||
result["MedDatum"] = row.get("MedDatum", None)
|
|
||||||
results.append(result)
|
|
||||||
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing row {idx}: {e}")
|
|
||||||
results.append({
|
|
||||||
"success": False,
|
|
||||||
"error": str(e),
|
|
||||||
"unique_id": row.get("unique_id", f"row_{idx}"),
|
|
||||||
"MedDatum": row.get("MedDatum", None)
|
|
||||||
})
|
|
||||||
# Save results to a JSON file
|
|
||||||
output_json = INPUT_CSV.replace(".csv", "_results_total.json")
|
|
||||||
with open(output_json, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(results, f, indent=2, ensure_ascii=False)
|
|
||||||
print(f"Results saved to {output_json}")
|
|
||||||
|
|
||||||
|
|
||||||
Reference in New Issue
Block a user