diff --git a/.gitignore b/.gitignore index 685e910..b979619 100644 --- a/.gitignore +++ b/.gitignore @@ -90,3 +90,4 @@ latex/*.synctex.gz # logs # ====================== var/ +licenses/gurobi.lic diff --git a/src/optimization/__pycache__/model_builder.cpython-313.pyc b/src/optimization/__pycache__/model_builder.cpython-313.pyc index 4a9a064..b1842a4 100644 Binary files a/src/optimization/__pycache__/model_builder.cpython-313.pyc and b/src/optimization/__pycache__/model_builder.cpython-313.pyc differ diff --git a/src/optimization/__pycache__/run_optimization.cpython-313.pyc b/src/optimization/__pycache__/run_optimization.cpython-313.pyc index 84d084e..1cc4466 100644 Binary files a/src/optimization/__pycache__/run_optimization.cpython-313.pyc and b/src/optimization/__pycache__/run_optimization.cpython-313.pyc differ diff --git a/src/optimization/model_builder.py b/src/optimization/model_builder.py index 4d3434b..305b19f 100644 --- a/src/optimization/model_builder.py +++ b/src/optimization/model_builder.py @@ -1436,6 +1436,7 @@ def solve_model( time_limit: int = 600, mip_gap: float = 0.05, iis_path: Path = Path("results/iis.ilp"), + use_warmstart: bool = False, ) -> pyo.SolverResults: if solver_name == "highs": solver = pyo.SolverFactory("highs") @@ -1447,7 +1448,21 @@ def solve_model( "output_flag": True, } ) - return solver.solve(model, tee=True) + if hasattr(solver, "config"): + solver.config.load_solutions = False + solver.config.raise_exception_on_nonoptimal_result = False + solve_kwargs = {"tee": True} + if use_warmstart and hasattr(solver, "warm_start_capable"): + try: + if solver.warm_start_capable(): + solve_kwargs["warmstart"] = True + except Exception: + pass + results = solver.solve(model, **solve_kwargs) + solution_status = str(getattr(results, "solution_status", "")).lower() + if solution_status in {"feasible", "optimal"} and hasattr(results, "solution_loader"): + results.solution_loader.load_vars() + return results opt = pyo.SolverFactory(solver_name) if solver_name == "gurobi": @@ -1458,7 +1473,14 @@ def solve_model( # SCIP uses hierarchical parameter names. opt.options["limits/time"] = time_limit opt.options["limits/gap"] = mip_gap - results = opt.solve(model, tee=True, symbolic_solver_labels=True) + solve_kwargs = {"tee": True, "symbolic_solver_labels": True} + if use_warmstart and hasattr(opt, "warm_start_capable"): + try: + if opt.warm_start_capable(): + solve_kwargs["warmstart"] = True + except Exception: + pass + results = opt.solve(model, **solve_kwargs) if solver_name == "gurobi" and results.solver.termination_condition in { TerminationCondition.infeasible, diff --git a/src/optimization/run_optimization.py b/src/optimization/run_optimization.py index d38c601..297e985 100644 --- a/src/optimization/run_optimization.py +++ b/src/optimization/run_optimization.py @@ -1,6 +1,7 @@ from __future__ import annotations import argparse +import json import sys from pathlib import Path @@ -15,6 +16,109 @@ if str(SRC_ROOT) not in sys.path: from optimization.model_builder import build_model, load_tables, solve_model +def _safe_value(component) -> float: + val = pyo.value(component, exception=False) + return float(val) if val is not None else 0.0 + + +def export_warmstart(model: pyo.ConcreteModel, output_path: Path) -> None: + output_path.parent.mkdir(parents=True, exist_ok=True) + + payload: dict[str, object] = { + "schema_version": 1, + "step_size_tonnes": _safe_value(model.step_size_tonnes), + "variables": {}, + } + + variables: dict[str, list[dict[str, object]]] = {} + + k_rows = [] + for i in model.I: + for j in model.J: + for w in model.W: + for d in model.D: + for s in model.S: + value = _safe_value(model.k[i, j, w, d, s]) + if abs(value) <= 1e-9: + continue + k_rows.append({"i": i, "j": j, "w": int(w), "d": d, "s": s, "value": round(value, 8)}) + variables["k"] = k_rows + + if hasattr(model, "bunker"): + bunker_rows = [] + for i in model.I: + for j in getattr(model, "J_BUNKER", []): + for w in model.W: + for d in model.D: + value = _safe_value(model.bunker[i, j, w, d]) + if abs(value) <= 1e-9: + continue + bunker_rows.append({"i": i, "j": j, "w": int(w), "d": d, "value": round(value, 8)}) + variables["bunker"] = bunker_rows + + if hasattr(model, "bunker_out"): + bunker_out_rows = [] + for i in model.I: + for j in getattr(model, "J_BUNKER", []): + for w in model.W: + for d in model.D: + for s in model.S: + value = _safe_value(model.bunker_out[i, j, w, d, s]) + if abs(value) <= 1e-9: + continue + bunker_out_rows.append( + {"i": i, "j": j, "w": int(w), "d": d, "s": s, "value": round(value, 8)} + ) + variables["bunker_out"] = bunker_out_rows + + payload["variables"] = variables + output_path.write_text(json.dumps(payload, indent=2, ensure_ascii=True), encoding="utf-8") + + +def load_warmstart(model: pyo.ConcreteModel, input_path: Path) -> None: + payload = json.loads(input_path.read_text(encoding="utf-8")) + variables = payload.get("variables", {}) + if not isinstance(variables, dict): + raise ValueError("Warmstart-Datei ist ungueltig: 'variables' fehlt oder hat falsches Format") + + for idx in model.k: + model.k[idx].set_value(0) + if hasattr(model, "bunker"): + for idx in model.bunker: + model.bunker[idx].set_value(0) + if hasattr(model, "bunker_out"): + for idx in model.bunker_out: + model.bunker_out[idx].set_value(0) + + def set_rows(component_name: str, component, keys: tuple[str, ...]) -> None: + rows = variables.get(component_name, []) + if rows is None: + return + if not isinstance(rows, list): + raise ValueError(f"Warmstart-Datei ist ungueltig: '{component_name}' muss eine Liste sein") + for row in rows: + if not isinstance(row, dict): + raise ValueError(f"Warmstart-Datei ist ungueltig: Eintrag in '{component_name}' ist kein Objekt") + try: + index = tuple(row[key] for key in keys) + except KeyError as exc: + raise ValueError( + f"Warmstart-Datei ist ungueltig: Schluessel {exc.args[0]!r} fehlt in '{component_name}'" + ) from exc + if len(index) >= 3: + index = tuple(int(part) if keys[pos] == "w" else part for pos, part in enumerate(index)) + if index not in component: + continue + value = float(row.get("value", 0.0)) + component[index].set_value(value) + + set_rows("k", model.k, ("i", "j", "w", "d", "s")) + if hasattr(model, "bunker"): + set_rows("bunker", model.bunker, ("i", "j", "w", "d")) + if hasattr(model, "bunker_out"): + set_rows("bunker_out", model.bunker_out, ("i", "j", "w", "d", "s")) + + def report_results(model: pyo.ConcreteModel, max_rows: int) -> None: @@ -714,8 +818,8 @@ def main() -> None: parser.add_argument( "--mip-gap", type=float, - default=0.03, - help="MIP gap tolerance (default: 0.03).", + default=0.30, + help="MIP gap tolerance (default: 0.30).", ) parser.add_argument( "--step-size-tonnes", @@ -724,14 +828,36 @@ def main() -> None: choices=[960, 1000], help="Discrete train step size in tonnes (default: 1000).", ) + parser.add_argument( + "--warmstart-in", + type=Path, + default=None, + help="Optional JSON solution snapshot to use as warmstart.", + ) + parser.add_argument( + "--warmstart-out", + type=Path, + default=None, + help="Optional JSON path for exporting a reusable warmstart snapshot.", + ) args = parser.parse_args() tables = load_tables(args.data_dir) model = build_model(tables, step_size_tonnes=args.step_size_tonnes) + if args.warmstart_in is not None: + load_warmstart(model, args.warmstart_in) - solve_model(model, args.solver, args.time_limit, args.mip_gap) + solve_model( + model, + args.solver, + args.time_limit, + args.mip_gap, + use_warmstart=args.warmstart_in is not None, + ) # report_results(model, args.max_rows) export_results(model, args.output_xlsx) + if args.warmstart_out is not None: + export_warmstart(model, args.warmstart_out) if __name__ == "__main__": diff --git a/webapp/backend/main.py b/webapp/backend/main.py index 9231367..4b039a7 100644 --- a/webapp/backend/main.py +++ b/webapp/backend/main.py @@ -105,12 +105,21 @@ def _run_step( _clear_active_process(job_id) if result_code != 0: - raise RuntimeError(f"Command failed: {' '.join(cmd)}") + tail = "" + try: + lines = log_path.read_text(encoding="utf-8", errors="replace").splitlines() + tail_lines = lines[-40:] + if tail_lines: + tail = "\n" + "\n".join(tail_lines) + except Exception: + pass + raise RuntimeError(f"Command failed: {' '.join(cmd)}{tail}") def _run_job( job_dir: Path, input_path: Path, + warmstart_input_path: Path | None, processed_dir: Path, output_dir: Path, solver_name: str, @@ -141,29 +150,38 @@ def _run_job( cancel_event=cancel_event, ) output_path = output_dir / "output.xlsx" + warmstart_output_path = output_dir / "warmstart.json" + optimization_cmd = [ + sys.executable, + "src/optimization/run_optimization.py", + "--data-dir", + str(processed_dir), + "--solver", + solver_name, + "--step-size-tonnes", + str(step_size_tonnes), + "--mip-gap", + str(mip_gap), + "--time-limit", + str(time_limit_seconds), + "--output-xlsx", + str(output_path), + "--warmstart-out", + str(warmstart_output_path), + ] + if warmstart_input_path is not None and warmstart_input_path.exists(): + optimization_cmd.extend(["--warmstart-in", str(warmstart_input_path)]) _run_step( - [ - sys.executable, - "src/optimization/run_optimization.py", - "--data-dir", - str(processed_dir), - "--solver", - solver_name, - "--step-size-tonnes", - str(step_size_tonnes), - "--mip-gap", - str(mip_gap), - "--time-limit", - str(time_limit_seconds), - "--output-xlsx", - str(output_path), - ], + optimization_cmd, logs_dir / "optimization.log", env=env, job_id=job_dir.name, cancel_event=cancel_event, ) - _write_status(job_dir, "completed", {"output": str(output_path)}) + extra = {"output": str(output_path)} + if warmstart_output_path.exists(): + extra["warmstart"] = str(warmstart_output_path) + _write_status(job_dir, "completed", extra) except JobCancelledError: _write_status(job_dir, "cancelled") except Exception as exc: @@ -342,6 +360,7 @@ def _read_capacity_timeseries(job_dir: Path) -> dict[str, object]: granularity: str, limit_value: float | None = None, limit_map: dict[tuple, float] | None = None, + marker_points: list[dict[str, object]] | None = None, ) -> dict[str, object]: points: list[dict[str, object]] = [] for row in df_usage.itertuples(index=False): @@ -379,7 +398,13 @@ def _read_capacity_timeseries(job_dir: Path) -> dict[str, object]: "utilization_pct": float(util) if util is not None else None, } ) - return {"id": series_id, "label": label, "granularity": granularity, "points": points} + return { + "id": series_id, + "label": label, + "granularity": granularity, + "points": points, + "marker_points": marker_points or [], + } def _aggregate_usage( *, @@ -562,6 +587,14 @@ def _read_capacity_timeseries(job_dir: Path) -> dict[str, object]: if avail_file.exists(): avail = pd.read_parquet(avail_file) shift_map = {"S1": "F", "S2": "S", "S3": "N"} + static_shift_caps = { + "welzow": _cap_lookup("Welzow-Süd", "pro Schicht"), + "rw_no": _cap_lookup("Boxberg (RW+NO)", "pro Schicht"), + } + marker_target_series = { + "welzow": "verladung_welzow_shift", + "rw_no": "verladung_boxberg_shift", + } for scope, label, cols, sources in [ ( @@ -593,18 +626,26 @@ def _read_capacity_timeseries(job_dir: Path) -> dict[str, object]: if not limit_map: continue - usage_df = _aggregate_usage(sources=sources, granularity="shift") - if usage_df.empty: - continue - verladung_series.append( - _series_from_dataframe( - series_id=f"availability_{scope}_shift", - label=label, - df_usage=usage_df, - granularity="shift", - limit_map=limit_map, - ) - ) + static_cap = static_shift_caps.get(scope) + marker_points: list[dict[str, object]] = [] + if static_cap is not None: + for (date, shift), dynamic_cap in sorted(limit_map.items()): + if dynamic_cap >= static_cap: + continue + marker_points.append( + { + "x": _point_x(date, shift=shift, granularity="shift"), + "label": _point_label(date, shift=shift, granularity="shift"), + "limit_tonnes": float(dynamic_cap), + "delta_tonnes": float(static_cap - dynamic_cap), + } + ) + target_series_id = marker_target_series.get(scope) + if target_series_id and marker_points: + for series in verladung_series: + if series.get("id") == target_series_id: + series["marker_points"] = marker_points + break if verladung_series: groups.append({"key": "verladung", "label": "Verladungskapazitäten", "series": verladung_series}) @@ -782,17 +823,27 @@ def _read_capacity_timeseries(job_dir: Path) -> dict[str, object]: continue limit_map[(date, shift)] = float(val) - usage_df = _aggregate_usage(targets={"J"}, granularity="shift") - if limit_map and not usage_df.empty: - zug_series.append( - _series_from_dataframe( - series_id="zug_kvb_nord_dynamic_j", - label="Zugdurchlass KVB Nord (dynamisch) -> KW Jänschwalde", - df_usage=usage_df, - granularity="shift", - limit_map=limit_map, + static_cap_j = _zug_cap("KUP + KLP", "KW Jänschwalde") + marker_points: list[dict[str, object]] = [] + if limit_map and static_cap_j is not None: + for (date, shift), dynamic_cap in sorted(limit_map.items()): + if dynamic_cap >= static_cap_j: + continue + marker_points.append( + { + "x": _point_x(date, shift=shift, granularity="shift"), + "label": _point_label(date, shift=shift, granularity="shift"), + "limit_tonnes": float(dynamic_cap), + "delta_tonnes": float(static_cap_j - dynamic_cap), + } ) - ) + + if marker_points: + for series in zug_series: + if series.get("id") == "zug_all_to_j": + existing = list(series.get("marker_points", [])) + series["marker_points"] = existing + marker_points + break if zug_series: groups.append({"key": "zugdurchlass", "label": "Zugdurchlasskapazitäten", "series": zug_series}) @@ -808,14 +859,15 @@ def health() -> dict[str, object]: @app.post("/api/run") async def run( file: UploadFile = File(...), + warmstart_file: UploadFile | None = File(None), solver: str = Form("highs"), step_size_tonnes: int = Form(1000), - mip_gap_pct: float = Form(5.0), + mip_gap_pct: float = Form(30.0), max_runtime_minutes: float = Form(10.0), ) -> dict[str, str]: solver = solver.lower().strip() - if solver not in {"highs", "gurobi", "scip"}: - raise HTTPException(status_code=400, detail="Unsupported solver") + if solver != "highs": + raise HTTPException(status_code=400, detail="Only HiGHS is enabled at the moment") availability = _get_solver_availability() if not availability.get(solver, False): raise HTTPException(status_code=400, detail=f"Solver not available: {solver}") @@ -832,14 +884,21 @@ async def run( input_dir = job_dir / "input" processed_dir = job_dir / "processed" output_dir = job_dir / "output" + warmstart_dir = job_dir / "warmstart" input_dir.mkdir(parents=True, exist_ok=True) processed_dir.mkdir(parents=True, exist_ok=True) output_dir.mkdir(parents=True, exist_ok=True) + warmstart_dir.mkdir(parents=True, exist_ok=True) input_path = input_dir / "PoC1_Rohkohleverteilung_Input_Parameter.xlsx" with input_path.open("wb") as buffer: shutil.copyfileobj(file.file, buffer) + warmstart_input_path = None + if warmstart_file is not None and warmstart_file.filename: + warmstart_input_path = warmstart_dir / "warmstart.json" + with warmstart_input_path.open("wb") as buffer: + shutil.copyfileobj(warmstart_file.file, buffer) cancel_event = threading.Event() with ACTIVE_JOBS_LOCK: @@ -853,6 +912,7 @@ async def run( "step_size_tonnes": str(step_size_tonnes), "mip_gap_pct": str(float(mip_gap_pct)), "max_runtime_minutes": str(float(max_runtime_minutes)), + "warmstart_used": "true" if warmstart_input_path is not None else "false", }, ) thread = threading.Thread( @@ -860,6 +920,7 @@ async def run( args=( job_dir, input_path, + warmstart_input_path, processed_dir, output_dir, solver, @@ -875,10 +936,12 @@ async def run( return { "job_id": job_id, "download_url": f"/api/jobs/{job_id}/output", + "warmstart_download_url": f"/api/jobs/{job_id}/warmstart", "solver": solver, "step_size_tonnes": str(step_size_tonnes), "mip_gap_pct": str(float(mip_gap_pct)), "max_runtime_minutes": str(float(max_runtime_minutes)), + "warmstart_used": "true" if warmstart_input_path is not None else "false", } @@ -929,6 +992,14 @@ def job_output(job_id: str) -> FileResponse: return FileResponse(output_path, filename="output.xlsx") +@app.get("/api/jobs/{job_id}/warmstart") +def job_warmstart(job_id: str) -> FileResponse: + warmstart_path = JOBS_DIR / job_id / "output" / "warmstart.json" + if not warmstart_path.exists(): + raise HTTPException(status_code=404, detail="Warmstart output not found") + return FileResponse(warmstart_path, filename="warmstart.json") + + @app.get("/api/jobs/{job_id}/monthly-flows") def job_monthly_flows(job_id: str) -> dict[str, object]: output_path = JOBS_DIR / job_id / "output" / "output.xlsx" diff --git a/webapp/frontend/src/App.jsx b/webapp/frontend/src/App.jsx index f7c1405..e07e6db 100644 --- a/webapp/frontend/src/App.jsx +++ b/webapp/frontend/src/App.jsx @@ -160,8 +160,10 @@ function parseSolverProgress(logText, solverName) { export default function App() { const [file, setFile] = useState(null); + const [warmstartFile, setWarmstartFile] = useState(null); const [status, setStatus] = useState("Bereit für Upload"); const [downloadUrl, setDownloadUrl] = useState(""); + const [warmstartDownloadUrl, setWarmstartDownloadUrl] = useState(""); const [jobId, setJobId] = useState(""); const [logText, setLogText] = useState(""); const [jobState, setJobState] = useState(""); @@ -173,18 +175,18 @@ export default function App() { const [selectedCapacitySeries, setSelectedCapacitySeries] = useState({}); const [solver, setSolver] = useState("highs"); const [stepSizeTonnes, setStepSizeTonnes] = useState("1000"); - const [mipGapPct, setMipGapPct] = useState("5"); + const [mipGapPct, setMipGapPct] = useState("30"); const [maxRuntimeMinutes, setMaxRuntimeMinutes] = useState("10.0"); const [availableSolvers, setAvailableSolvers] = useState({ highs: true, gurobi: false, - scip: false, }); const [theme, setTheme] = useState("dark"); const [jobSolver, setJobSolver] = useState(""); const [jobStepSizeTonnes, setJobStepSizeTonnes] = useState(""); const [jobMipGapPct, setJobMipGapPct] = useState(""); const [jobMaxRuntimeMinutes, setJobMaxRuntimeMinutes] = useState(""); + const [jobWarmstartUsed, setJobWarmstartUsed] = useState(""); const [elapsedSeconds, setElapsedSeconds] = useState(0); const [timerRunning, setTimerRunning] = useState(false); const [cancelPending, setCancelPending] = useState(false); @@ -205,6 +207,7 @@ export default function App() { setStatus("Upload läuft…"); setDownloadUrl(""); + setWarmstartDownloadUrl(""); setJobId(""); setLogText(""); setJobState(""); @@ -218,12 +221,16 @@ export default function App() { setJobStepSizeTonnes(""); setJobMipGapPct(""); setJobMaxRuntimeMinutes(""); + setJobWarmstartUsed(""); setElapsedSeconds(0); setTimerRunning(true); setCancelPending(false); const formData = new FormData(); formData.append("file", file); + if (warmstartFile) { + formData.append("warmstart_file", warmstartFile); + } formData.append("solver", solver); formData.append("step_size_tonnes", stepSizeTonnes); formData.append("mip_gap_pct", mipGapPct); @@ -245,6 +252,7 @@ export default function App() { setJobStepSizeTonnes(String(data.step_size_tonnes || stepSizeTonnes)); setJobMipGapPct(String(data.mip_gap_pct || mipGapPct)); setJobMaxRuntimeMinutes(String(data.max_runtime_minutes || maxRuntimeMinutes)); + setJobWarmstartUsed(String(data.warmstart_used || "")); setStatus("Job gestartet. Warte auf Ergebnis…"); } catch (error) { setStatus("Fehler: Job konnte nicht abgeschlossen werden."); @@ -294,8 +302,12 @@ export default function App() { if (data.max_runtime_minutes) { setJobMaxRuntimeMinutes(String(data.max_runtime_minutes)); } + if (data.warmstart_used) { + setJobWarmstartUsed(String(data.warmstart_used)); + } if (data.status === "completed") { setDownloadUrl(`${API_BASE}/api/jobs/${id}/output`); + setWarmstartDownloadUrl(`${API_BASE}/api/jobs/${id}/warmstart`); setStatus("Fertig. Ergebnis steht zum Download bereit."); setTimerRunning(false); setCancelPending(false); @@ -364,14 +376,10 @@ export default function App() { const reported = data?.solvers || {}; const solvers = { highs: reported.highs !== false, - gurobi: reported.gurobi === true, - scip: reported.scip === true, + gurobi: false, }; setAvailableSolvers(solvers); - if ( - (solver === "gurobi" && !solvers.gurobi) || - (solver === "scip" && !solvers.scip) - ) { + if (solver === "gurobi" && !solvers.gurobi) { setSolver("highs"); } } catch { @@ -672,10 +680,29 @@ export default function App() { line: { color: plotTheme.limit, width: 2, dash: "dash" }, hovertemplate: "%{x}
Grenze: %{y:.1f} kt", }; + const markerPoints = series.marker_points || []; + const nonavailabilityTrace = markerPoints.length + ? { + type: "scatter", + mode: "markers", + name: "Kurzfristige Nichtverfügbarkeit", + x: markerPoints.map((p) => p.x), + y: markerPoints.map((p) => tonnesToKt(p.limit_tonnes)), + marker: { + size: 10, + color: plotTheme.limit, + symbol: "circle", + line: { width: 1, color: plotTheme.paper }, + }, + customdata: markerPoints.map((p) => [p.label, tonnesToKt(p.delta_tonnes)]), + hovertemplate: + "%{customdata[0]}
Reduzierte Grenze: %{y:.1f} kt
Ausfall: %{customdata[1]:.1f} kt", + } + : null; return { group, series, - data: [usageTrace, limitTrace], + data: [usageTrace, limitTrace, nonavailabilityTrace].filter(Boolean), layout: { paper_bgcolor: plotTheme.paper, plot_bgcolor: plotTheme.plot, @@ -736,10 +763,20 @@ export default function App() { onChange={(event) => setFile(event.target.files?.[0] || null)} /> - {file ? file.name : "PoC1_Rohkohleverteilung_Input_Parameter.xlsx auswählen"} + {file ? file.name : "Input"} + + @@ -825,6 +863,9 @@ export default function App() {

Max Laufzeit: {formatDecimalWithDot(jobMaxRuntimeMinutes || maxRuntimeMinutes)} min

+

+ Warmstart: {jobWarmstartUsed === "true" ? "verwendet" : "nicht verwendet"} +

{(jobId || logText) && (
@@ -855,9 +896,14 @@ export default function App() {
)} {downloadUrl && ( - - Ergebnis herunterladen - + <> + + Ergebnis herunterladen + + + Warmstart herunterladen + + )} {logText && (