First commit

This commit is contained in:
2026-01-18 21:53:31 +01:00
commit 24a8e1cdf5
4 changed files with 487 additions and 0 deletions

6
.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
# Ignore all contents of these directories
/Data/
/attach/
/results/
/enarcelona/
.env

0
README.md Normal file
View File

216
app.py Normal file
View File

@@ -0,0 +1,216 @@
# %% API call1
import time
import json
import os
from datetime import datetime
import pandas as pd
from openai import OpenAI
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# === CONFIGURATION ===
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL")
MODEL_NAME = "GPT-OSS-120B"
HEALTH_URL = f"{OPENAI_BASE_URL}/health" # Placeholder - actual health check would need to be implemented
CHAT_URL = f"{OPENAI_BASE_URL}/chat/completions"
# File paths
INPUT_CSV = "/home/shahin/Lab/Doktorarbeit/Barcelona/Data/MS_Briefe_400_with_unique_id_SHA3_explore_cleaned_unique.csv"
EDSS_INSTRUCTIONS_PATH = "/home/shahin/Lab/Doktorarbeit/Barcelona/attach/Komplett.txt"
#GRAMMAR_FILE = "/home/shahin/Lab/Doktorarbeit/Barcelona/attach/just_edss_schema.gbnf"
# Initialize OpenAI client
client = OpenAI(
api_key=OPENAI_API_KEY,
base_url=OPENAI_BASE_URL
)
# Read EDSS instructions from file
with open(EDSS_INSTRUCTIONS_PATH, 'r') as f:
EDSS_INSTRUCTIONS = f.read().strip()
# === RUN INFERENCE ===
#def run_inference(patient_text):
# prompt = f''' Du bist ein medizinischer Assistent, der spezialisiert darauf ist, EDSS-Scores (Expanded Disability Status Scale) aus klinischen Berichten zu extrahieren.
#
#### Regeln für die Ausgabe:
#1. **Reason**: Erstelle eine prägnante Zusammenfassung (max. 400 Zeichen) der Befunde, die zur Einstufung führen. Die Zusammenfassung muss auf **DEUTSCH** verfasst sein.
#2. **nicht_klassifizierbar**:
# - Setze dies auf **true**, NUR wenn die klinischen Daten unzureichend, widersprüchlich oder fehlend sind, um einen EDSS-Wert zu bestimmen.
# - Setze dies auf **false**, wenn ein spezifischer EDSS-Wert identifiziert oder berechnet werden kann.
#3. **EDSS**:
# - Dieses Feld ist **VERPFLICHTEND**, wenn "nicht_klassifizierbar" auf false steht.
# - Es muss eine Zahl zwischen 0.0 und 10.0 sein.
# - Dieses Feld **DARF NICHT ERSCHEINEN**, wenn "nicht_klassifizierbar" auf true steht.
#
#### Einschränkungen:
#- Erfinde keine Daten.
#- Wenn der Bericht zweideutig ist, markiere "nicht_klassifizierbar": true.
#- Halte dich strikt an die JSON-Struktur.
#
#EDSS-Bewertungsrichtlinien:
#{EDSS_INSTRUCTIONS}
#
#Patientenbericht:
#{patient_text}
#'''
#
# start_time = time.time()
#
# try:
# # Make API call using OpenAI client
# response = client.chat.completions.create(
# messages=[
# {
# "role": "system",
# "content": "You extract EDSS scores."
# },
# {
# "role": "user",
# "content": prompt
# }
# ],
# model=MODEL_NAME,
# max_tokens=2048,
# temperature=0.0,
# response_format={"type": "json_object"}
# )
#
# # Extract content from response
# content = response.choices[0].message.content
#
# # Parse the JSON response
# parsed = json.loads(content)
#
# inference_time = time.time() - start_time
#
# return {
# "success": True,
# "result": parsed,
# "inference_time_sec": inference_time
# }
#
# except Exception as e:
# print(f"Inference error: {e}")
# return {
# "success": False,
# "error": str(e),
# "inference_time_sec": -1
# }
#
# === RUN INFERENCE 2 ===
def run_inference(patient_text):
prompt = f''' Du bist ein medizinischer Assistent, der spezialisiert darauf ist, EDSS-Scores (Expanded Disability Status Scale) aus klinischen Berichten zu extrahieren.
### Regeln für die Ausgabe:
1. **Reason**: Erstelle eine prägnante Zusammenfassung (max. 400 Zeichen) der Befunde auf **DEUTSCH**, die zur Einstufung führen.
2. **klassifizierbar**:
- Setze dies auf **true**, wenn ein EDSS-Wert identifiziert, berechnet oder basierend auf den klinischen Hinweisen plausibel geschätzt werden kann.
- Setze dies auf **false**, NUR wenn die Daten absolut unzureichend oder so widersprüchlich sind, dass keinerlei Einstufung möglich ist.
3. **EDSS**:
- Dieses Feld ist **VERPFLICHTEND**, wenn "klassifizierbar" auf true steht.
- Es muss eine Zahl zwischen 0.0 und 10.0 sein.
- Versuche stets, den EDSS-Wert so präzise wie möglich zu bestimmen, auch wenn die Datenlage dünn ist (nutze verfügbare Informationen zu Gehstrecke und Funktionssystemen).
- Dieses Feld **DARF NICHT ERSCHEINEN**, wenn "klassifizierbar" auf false steht.
### Einschränkungen:
- Erfinde keine Fakten, aber nutze klinische Herleitungen aus dem Bericht, um den EDSS zu bestimmen.
- Priorisiere die Vergabe eines EDSS-Wertes gegenüber der Markierung als nicht klassifizierbar.
- Halte dich strikt an die JSON-Struktur.
EDSS-Bewertungsrichtlinien:
{EDSS_INSTRUCTIONS}
Patientenbericht:
{patient_text}
'''
start_time = time.time()
try:
# Make API call using OpenAI client
response = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "You extract EDSS scores. You prioritize providing a score even if data is partial, by using clinical inference."
},
{
"role": "user",
"content": prompt
}
],
model=MODEL_NAME,
max_tokens=2048,
temperature=0.0,
response_format={"type": "json_object"}
)
# Extract content from response
content = response.choices[0].message.content
# Parse the JSON response
parsed = json.loads(content)
inference_time = time.time() - start_time
return {
"success": True,
"result": parsed,
"inference_time_sec": inference_time
}
except Exception as e:
print(f"Inference error: {e}")
return {
"success": False,
"error": str(e),
"inference_time_sec": -1
}
# === BUILD PATIENT TEXT ===
def build_patient_text(row):
return (
str(row["T_Zusammenfassung"]) + "\n" +
str(row["Diagnosen"]) + "\n" +
str(row["T_KlinBef"]) + "\n" +
str(row["T_Befunde"]) + "\n"
)
if __name__ == "__main__":
# Read CSV file ONLY inside main block
df = pd.read_csv(INPUT_CSV, sep=';')
results = []
# Process each row
for idx, row in df.iterrows():
print(f"Processing row {idx + 1}/{len(df)}")
try:
patient_text = build_patient_text(row)
result = run_inference(patient_text)
# Add unique_id and MedDatum to result for tracking
result["unique_id"] = row.get("unique_id", f"row_{idx}")
result["MedDatum"] = row.get("MedDatum", None)
results.append(result)
print(json.dumps(result, indent=2))
except Exception as e:
print(f"Error processing row {idx}: {e}")
results.append({
"success": False,
"error": str(e),
"unique_id": row.get("unique_id", f"row_{idx}"),
"MedDatum": row.get("MedDatum", None)
})
# Save results to a JSON file
output_json = INPUT_CSV.replace(".csv", "_results2.json")
with open(output_json, 'w') as f:
json.dump(results, f, indent=2)
print(f"Results saved to {output_json}")
##

265
figure1.py Normal file
View File

@@ -0,0 +1,265 @@
# %% Explore
import pandas as pd
# Load the dataset
file_path = '/home/shahin/Lab/Doktorarbeit/Barcelona/Data/MS_Briefe_400_with_unique_id_SHA3_explore_cleaned_unique.csv'
df = pd.read_csv(file_path, sep=';')
# Show basic information about the dataset
print("Dataset shape:", df.shape)
print("\nColumn names:")
for col in df.columns:
print(f" {col}")
# Count unique patients
unique_patients = df['unique_id'].nunique()
print(f"\nNumber of unique patients: {unique_patients}")
# Count how many times each patient appears
patient_counts = df['unique_id'].value_counts()
# Calculate average appearances per patient
average_appearances = patient_counts.mean()
# Show some statistics
print(f"\nAverage number of times each patient appeared: {average_appearances:.2f}")
print(f"\nMinimum appearances for any patient: {patient_counts.min()}")
print(f"Maximum appearances for any patient: {patient_counts.max()}")
# Show the first few patient counts
print("\nFirst 10 patients and their appearance counts:")
print(patient_counts.head(10))
# Show how many patients appear exactly once
single_occurrence = (patient_counts == 1).sum()
print(f"\nNumber of patients who appeared exactly once: {single_occurrence}")
# Show how many patients appear more than once
multiple_occurrence = (patient_counts > 1).sum()
print(f"Number of patients who appeared more than once: {multiple_occurrence}")
##
# %% EDSS Dist
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Assuming your data is loaded into a DataFrame called 'df'
# If you need to load from file:
df = pd.read_csv('/home/shahin/Lab/Doktorarbeit/Barcelona/Data/MS_Briefe_400_with_unique_id_SHA3_explore_cleaned_unique.csv', sep=';')
# Create EDSS categories function with better error handling
def categorize_edss(edss_value):
# Handle NaN and empty values
if pd.isna(edss_value) or edss_value == '' or edss_value is None:
return 'No EDSS'
# Convert to string and replace comma with dot
edss_str = str(edss_value).strip()
# Handle special cases
if edss_str.lower() in ['unverändert', 'unchanged', 'n/a', 'na', 'none', 'null', '']:
return 'Invalid'
try:
# Replace comma with dot for decimal numbers
edss_str = edss_str.replace(',', '.')
# Try to convert to float
edss_float = float(edss_str)
# Categorize based on value
if 0 <= edss_float <= 1:
return '0-1'
elif 1 < edss_float <= 2:
return '1-2'
elif 2 < edss_float <= 3:
return '2-3'
elif 3 < edss_float <= 4:
return '3-4'
elif 4 < edss_float <= 5:
return '4-5'
elif 5 < edss_float <= 6:
return '5-6'
elif 6 < edss_float <= 7:
return '6-7'
elif 7 < edss_float <= 8:
return '7-8'
elif 8 < edss_float <= 9:
return '8-9'
elif 9 < edss_float <= 10:
return '9-10'
else:
return 'Invalid'
except (ValueError, TypeError):
return 'Invalid'
# Apply categorization
df['EDSS_Category'] = df['EDSS'].apply(categorize_edss)
# Count patients in each category
edss_counts = df['EDSS_Category'].value_counts().sort_index()
# Print the counts for reference
print("Patient counts by EDSS category:")
print(edss_counts)
# Create the bar chart
plt.figure(figsize=(12, 6))
bars = plt.bar(edss_counts.index, edss_counts.values, color='skyblue', edgecolor='navy', alpha=0.7)
# Add value labels on top of bars
for bar in bars:
height = bar.get_height()
plt.text(bar.get_x() + bar.get_width()/2., height,
f'{int(height)}',
ha='center', va='bottom')
plt.xlabel('EDSS Score Categories')
plt.ylabel('Number of Cases')
plt.title('Distribution of Patients by EDSS Score Categories')
plt.xticks(rotation=45)
plt.grid(axis='y', alpha=0.3)
plt.tight_layout()
plt.show()
##
# %% Pie Chart
import matplotlib.pyplot as plt
import pandas as pd
# Your data
data = {
'Visit': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'patient_count': [2, 3, 3, 6, 13, 17, 28, 24, 32]
}
df = pd.DataFrame(data)
# Create pie chart
plt.figure(figsize=(10, 8))
# Define colors for better visualization
colors = ['#ff9999', '#66b3ff', '#99ff99', '#ffcc99', '#ff99cc',
'#c2c2f0', '#ffb3b3', '#99ffcc', '#ffccff']
# Create pie chart with custom labels showing both count and percentage
labels = [f'{visit} Visits \n{count} patients\n({count/128*100:.1f}%)'
for visit, count in zip(df['Visit'], df['patient_count'])]
wedges, texts, autotexts = plt.pie(df['patient_count'],
labels=labels,
colors=colors,
autopct='%1.1f%%',
startangle=90,
explode=[0.05] * len(df)) # Slightly separate slices
# Add title
plt.title('Patient Visit Frequency Distribution\nTotal Patients: 128\nTotal Cases: 396', fontsize=16, pad=20)
# Ensure pie chart is circular
plt.axis('equal')
# Adjust layout to prevent legend cutoff
plt.tight_layout()
# Save as SVG (this will create a high-quality vector graphic)
plt.savefig('patient_visit_frequency.svg', format='svg', bbox_inches='tight', dpi=300)
# Also save as PNG for reference
plt.savefig('patient_visit_frequency.png', format='png', bbox_inches='tight', dpi=300)
# Show the chart
plt.show()
# Print summary statistics
print(f"Total patients: {sum(df['patient_count'])}")
print("\nVisit frequency breakdown:")
for visit, count in zip(df['Visit'], df['patient_count']):
percentage = (count / 128) * 100
print(f"{visit} Visits : {count} patients ({percentage:.1f}%)")
print("\nFiles created:")
print("- patient_visit_frequency.svg (SVG format)")
print("- patient_visit_frequency.png (PNG format)")
##
# %% Slope Chart
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# 1. Data Prep (Cleaning and filtering for patients with >1 visit)
df['EDSS'] = pd.to_numeric(df['EDSS'].astype(str).str.replace(',', '.'), errors='coerce')
df = df.dropna(subset=['unique_id', 'EDSS', 'MedDatum']).sort_values(['unique_id', 'MedDatum'])
# Extract first and last
first_last = df.groupby('unique_id').apply(lambda x: x.iloc[[0, -1]] if len(x) > 1 else None).reset_index(drop=True)
first_last['Visit_Type'] = first_last.groupby('unique_id').cumcount().map({0: 'First Visit', 1: 'Last Visit'})
# 2. Set the style
sns.set_style("white")
plt.figure(figsize=(10, 8))
# Define sophisticated colors
color_worsened = "#E67E22" # Muted Orange
color_improved = "#2ECC71" # Muted Green
color_stable = "#BDC3C7" # Soft Grey
avg_color = "#2C3E50" # Deep Navy
# 3. Plotting individual lines
for pid in first_last['unique_id'].unique():
p_data = first_last[first_last['unique_id'] == pid]
start, end = p_data.iloc[0]['EDSS'], p_data.iloc[1]['EDSS']
# Logic for color and linewidth
if end > start:
color, alpha, lw = color_worsened, 0.3, 1.2
elif end < start:
color, alpha, lw = color_improved, 0.3, 1.2
else:
color, alpha, lw = color_stable, 0.15, 0.8
plt.plot(p_data['Visit_Type'], p_data['EDSS'],
color=color, alpha=alpha, linewidth=lw, marker='o', markersize=4, markerfacecolor='white')
# 4. Add Background Distribution (Violin Plot)
sns.violinplot(x='Visit_Type', y='EDSS', data=first_last,
inner=None, color=".95", linewidth=0)
# 5. Add the Population Mean (The "Hero" line)
summary = first_last.groupby('Visit_Type')['EDSS'].mean().reindex(['First Visit', 'Last Visit'])
plt.plot(summary.index, summary.values, color=avg_color, linewidth=4,
marker='s', markersize=10, label='Average Population Trend', zorder=10)
# 6. Annotate the Average values
for i, val in enumerate(summary.values):
plt.text(i, val + 0.2, f'{val:.2f}', color=avg_color, fontweight='bold', ha='center')
# Aesthetics
plt.title('Evolution of EDSS Scores: First vs. Last Clinical Visit', fontsize=16, pad=20, fontweight='bold')
plt.ylabel('EDSS Score', fontsize=12)
plt.xlabel('')
plt.xticks(fontsize=12, fontweight='bold')
plt.yticks(np.arange(0, 10.5, 1)) # Typical EDSS scale
plt.ylim(-0.5, 10)
# Remove chart junk
sns.despine(left=True, bottom=True)
plt.grid(axis='y', color='gray', linestyle='--', alpha=0.2)
plt.legend(frameon=False, loc='upper center', bbox_to_anchor=(0.5, -0.05))
plt.tight_layout()
plt.show()
##