changes
This commit is contained in:
@@ -221,6 +221,16 @@ const WORK_ORDER_TEMPLATE_HEADERS = [
|
||||
"Active Cavities",
|
||||
] as const;
|
||||
|
||||
const WORK_ORDER_TEMPLATE_EXAMPLE_ROW = [
|
||||
"*borra esta fila al subir excel)",
|
||||
"SKU-12345",
|
||||
35,
|
||||
10000,
|
||||
"MOLD-01",
|
||||
8,
|
||||
8,
|
||||
] as const;
|
||||
|
||||
function normalizeKey(value: string) {
|
||||
return value.toLowerCase().replace(/[^a-z0-9]/g, "");
|
||||
}
|
||||
@@ -654,7 +664,10 @@ export default function MachineDetailClient() {
|
||||
async function downloadWorkOrderTemplate() {
|
||||
const xlsx = await import("xlsx");
|
||||
const wb = xlsx.utils.book_new();
|
||||
const ws = xlsx.utils.aoa_to_sheet([Array.from(WORK_ORDER_TEMPLATE_HEADERS)]);
|
||||
const ws = xlsx.utils.aoa_to_sheet([
|
||||
Array.from(WORK_ORDER_TEMPLATE_HEADERS),
|
||||
Array.from(WORK_ORDER_TEMPLATE_EXAMPLE_ROW),
|
||||
]);
|
||||
xlsx.utils.book_append_sheet(wb, ws, "Work Orders");
|
||||
const wbout = xlsx.write(wb, { bookType: "xlsx", type: "array" });
|
||||
const blob = new Blob([wbout], {
|
||||
|
||||
@@ -21,7 +21,7 @@ type SimpleTooltipProps<T> = {
|
||||
label?: string | number;
|
||||
};
|
||||
|
||||
type ChartPoint = { ts: string; label: string; value: number };
|
||||
type ChartPoint = { ts: string; label: string; value: number | null };
|
||||
type CycleHistogramRow = {
|
||||
label: string;
|
||||
count: number;
|
||||
@@ -135,7 +135,14 @@ export default function ReportsCharts({
|
||||
"OEE",
|
||||
]}
|
||||
/>
|
||||
<Line type="monotone" dataKey="value" stroke="#34d399" dot={false} strokeWidth={2} />
|
||||
<Line
|
||||
type="linear"
|
||||
dataKey="value"
|
||||
stroke="#34d399"
|
||||
dot={false}
|
||||
strokeWidth={2}
|
||||
connectNulls={false}
|
||||
/>
|
||||
</LineChart>
|
||||
</ResponsiveContainer>
|
||||
) : (
|
||||
|
||||
@@ -29,7 +29,7 @@ type ReportDowntime = {
|
||||
oeeDropCount: number;
|
||||
};
|
||||
|
||||
type ReportTrendPoint = { t: string; v: number };
|
||||
type ReportTrendPoint = { t: string; v: number | null };
|
||||
|
||||
type ReportPayload = {
|
||||
summary: ReportSummary;
|
||||
@@ -78,6 +78,31 @@ function downsample<T>(rows: T[], max: number) {
|
||||
return rows.filter((_, idx) => idx % step === 0);
|
||||
}
|
||||
|
||||
function downsampleTrendPreserveGaps(rows: ReportTrendPoint[], max: number) {
|
||||
if (rows.length <= max) return rows;
|
||||
const step = Math.ceil(rows.length / max);
|
||||
const picked = new Set<number>();
|
||||
|
||||
picked.add(0);
|
||||
picked.add(rows.length - 1);
|
||||
for (let idx = 0; idx < rows.length; idx += step) picked.add(idx);
|
||||
|
||||
// Keep both sides of null/non-null transitions so chart gaps remain visible.
|
||||
for (let idx = 1; idx < rows.length; idx += 1) {
|
||||
const prevIsNull = rows[idx - 1]?.v == null;
|
||||
const currIsNull = rows[idx]?.v == null;
|
||||
if (prevIsNull !== currIsNull) {
|
||||
picked.add(idx - 1);
|
||||
picked.add(idx);
|
||||
}
|
||||
}
|
||||
|
||||
return [...picked]
|
||||
.sort((a, b) => a - b)
|
||||
.map((idx) => rows[idx])
|
||||
.filter((row): row is ReportTrendPoint => row != null);
|
||||
}
|
||||
|
||||
function formatTickLabel(ts: string, range: RangeKey) {
|
||||
const d = new Date(ts);
|
||||
if (Number.isNaN(d.getTime())) return ts;
|
||||
@@ -107,7 +132,7 @@ function ReportsChartsSkeleton() {
|
||||
}
|
||||
|
||||
function buildCsv(report: ReportPayload, t: Translator) {
|
||||
const rows = new Map<string, Record<string, string | number>>();
|
||||
const rows = new Map<string, Record<string, string | number | null>>();
|
||||
const addSeries = (series: ReportTrendPoint[], key: string) => {
|
||||
for (const p of series) {
|
||||
const row = rows.get(p.t) ?? { timestamp: p.t };
|
||||
@@ -414,7 +439,7 @@ export default function ReportsPageClient({
|
||||
|
||||
const oeeSeries = useMemo(() => {
|
||||
const rows = trend?.oee ?? [];
|
||||
const trimmed = downsample(rows, 600);
|
||||
const trimmed = downsampleTrendPreserveGaps(rows, 600);
|
||||
return trimmed.map((p) => ({
|
||||
ts: p.t,
|
||||
label: formatTickLabel(p.t, range),
|
||||
|
||||
@@ -3,6 +3,13 @@ import { prisma } from "@/lib/prisma";
|
||||
import { requireSession } from "@/lib/auth/requireSession";
|
||||
import { coerceDowntimeRange, rangeToStart } from "@/lib/analytics/downtimeRange";
|
||||
import type { Prisma } from "@prisma/client";
|
||||
import {
|
||||
applyDowntimeFilters,
|
||||
loadDowntimeShiftContext,
|
||||
normalizeMicrostopLtMin,
|
||||
normalizeShiftFilter,
|
||||
resolvePlannedFilter,
|
||||
} from "@/lib/analytics/downtimeFilters";
|
||||
|
||||
const bad = (status: number, error: string) =>
|
||||
NextResponse.json({ ok: false, error }, { status });
|
||||
@@ -26,6 +33,9 @@ export async function GET(req: Request) {
|
||||
const machineId = url.searchParams.get("machineId"); // optional
|
||||
const reasonCode = url.searchParams.get("reasonCode"); // optional
|
||||
const includeMoldChange = url.searchParams.get("includeMoldChange") === "true";
|
||||
const planned = resolvePlannedFilter(url.searchParams.get("planned"), includeMoldChange);
|
||||
const shift = normalizeShiftFilter(url.searchParams.get("shift"));
|
||||
const microstopLtMin = normalizeMicrostopLtMin(url.searchParams.get("microstopLtMin"));
|
||||
|
||||
const limitRaw = url.searchParams.get("limit");
|
||||
const limit = Math.min(Math.max(Number(limitRaw || 200), 1), 500);
|
||||
@@ -50,7 +60,6 @@ export async function GET(req: Request) {
|
||||
orgId,
|
||||
kind: "downtime",
|
||||
episodeId: { not: null },
|
||||
...(includeMoldChange ? {} : { reasonCode: { not: "MOLD_CHANGE" } }),
|
||||
capturedAt: {
|
||||
gte: start,
|
||||
...(beforeDate ? { lt: beforeDate } : {}),
|
||||
@@ -59,10 +68,11 @@ export async function GET(req: Request) {
|
||||
...(reasonCode ? { reasonCode } : {}),
|
||||
};
|
||||
|
||||
const rows = await prisma.reasonEntry.findMany({
|
||||
const scanTake = Math.min(Math.max(limit * 8, 1000), 5000);
|
||||
const rowsRaw = await prisma.reasonEntry.findMany({
|
||||
where,
|
||||
orderBy: { capturedAt: "desc" },
|
||||
take: limit,
|
||||
take: scanTake,
|
||||
select: {
|
||||
id: true,
|
||||
episodeId: true,
|
||||
@@ -80,6 +90,14 @@ export async function GET(req: Request) {
|
||||
},
|
||||
});
|
||||
|
||||
const shiftContext = shift === "all" ? null : await loadDowntimeShiftContext(orgId);
|
||||
const rows = applyDowntimeFilters(rowsRaw, {
|
||||
planned,
|
||||
shift,
|
||||
microstopLtMin,
|
||||
shiftContext,
|
||||
}).slice(0, limit);
|
||||
|
||||
const events = rows.map((r) => {
|
||||
const startAt = r.capturedAt;
|
||||
const endAt =
|
||||
@@ -116,7 +134,11 @@ export async function GET(req: Request) {
|
||||
});
|
||||
|
||||
const nextBefore =
|
||||
events.length > 0 ? events[events.length - 1]?.capturedAt ?? null : null;
|
||||
events.length > 0
|
||||
? events[events.length - 1]?.capturedAt ?? null
|
||||
: rowsRaw.length > 0
|
||||
? toISO(rowsRaw[rowsRaw.length - 1]?.capturedAt)
|
||||
: null;
|
||||
|
||||
return NextResponse.json({
|
||||
ok: true,
|
||||
@@ -125,6 +147,9 @@ export async function GET(req: Request) {
|
||||
start,
|
||||
machineId: machineId ?? null,
|
||||
reasonCode: reasonCode ?? null,
|
||||
planned,
|
||||
shift,
|
||||
microstopLtMin,
|
||||
includeMoldChange,
|
||||
limit,
|
||||
before: before ?? null,
|
||||
|
||||
@@ -2,6 +2,13 @@ import { NextResponse } from "next/server";
|
||||
import { prisma } from "@/lib/prisma";
|
||||
import { requireSession } from "@/lib/auth/requireSession";
|
||||
import { coerceDowntimeRange, rangeToStart } from "@/lib/analytics/downtimeRange";
|
||||
import {
|
||||
applyDowntimeFilters,
|
||||
loadDowntimeShiftContext,
|
||||
normalizeMicrostopLtMin,
|
||||
normalizeShiftFilter,
|
||||
resolvePlannedFilter,
|
||||
} from "@/lib/analytics/downtimeFilters";
|
||||
|
||||
const bad = (status: number, error: string) =>
|
||||
NextResponse.json({ ok: false, error }, { status });
|
||||
@@ -21,6 +28,9 @@ export async function GET(req: Request) {
|
||||
const machineId = url.searchParams.get("machineId"); // optional
|
||||
const kind = (url.searchParams.get("kind") || "downtime").toLowerCase();
|
||||
const includeMoldChange = url.searchParams.get("includeMoldChange") === "true";
|
||||
const planned = resolvePlannedFilter(url.searchParams.get("planned"), includeMoldChange);
|
||||
const shift = normalizeShiftFilter(url.searchParams.get("shift"));
|
||||
const microstopLtMin = normalizeMicrostopLtMin(url.searchParams.get("microstopLtMin"));
|
||||
|
||||
if (kind !== "downtime" && kind !== "scrap" && kind !== "planned-downtime") {
|
||||
return bad(400, "Invalid kind (downtime|scrap|planned-downtime)");
|
||||
@@ -35,41 +45,82 @@ export async function GET(req: Request) {
|
||||
if (!m) return bad(404, "Machine not found");
|
||||
}
|
||||
|
||||
// ✅ Scope by orgId (+ machineId if provided)
|
||||
const grouped = await prisma.reasonEntry.groupBy({
|
||||
by: ["reasonCode", "reasonLabel"],
|
||||
where: {
|
||||
orgId,
|
||||
...(machineId ? { machineId } : {}),
|
||||
kind: kind === "planned-downtime" ? "downtime" : kind,
|
||||
...(kind === "downtime" && !includeMoldChange ? { reasonCode: { not: "MOLD_CHANGE" } } : {}),
|
||||
...(kind === "planned-downtime" ? { reasonCode: "MOLD_CHANGE" } : {}),
|
||||
capturedAt: { gte: start },
|
||||
},
|
||||
_sum: {
|
||||
durationSeconds: true,
|
||||
scrapQty: true,
|
||||
},
|
||||
_count: { _all: true },
|
||||
});
|
||||
let itemsRaw: { reasonCode: string; reasonLabel: string; value: number; count: number }[] = [];
|
||||
|
||||
const itemsRaw = grouped
|
||||
.map((g) => {
|
||||
const value =
|
||||
kind === "downtime" || kind === "planned-downtime"
|
||||
? Math.round(((g._sum.durationSeconds ?? 0) / 60) * 10) / 10 // minutes, 1 decimal
|
||||
: g._sum.scrapQty ?? 0;
|
||||
if (kind === "downtime" || kind === "planned-downtime") {
|
||||
const baseRows = await prisma.reasonEntry.findMany({
|
||||
where: {
|
||||
orgId,
|
||||
...(machineId ? { machineId } : {}),
|
||||
kind: "downtime",
|
||||
capturedAt: { gte: start },
|
||||
},
|
||||
select: {
|
||||
reasonCode: true,
|
||||
reasonLabel: true,
|
||||
durationSeconds: true,
|
||||
capturedAt: true,
|
||||
meta: true,
|
||||
episodeId: true,
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
const effectivePlanned = kind === "planned-downtime" ? "planned" : planned;
|
||||
const shiftContext = shift === "all" ? null : await loadDowntimeShiftContext(orgId);
|
||||
const filteredRows = applyDowntimeFilters(baseRows, {
|
||||
planned: effectivePlanned,
|
||||
shift,
|
||||
microstopLtMin,
|
||||
shiftContext,
|
||||
});
|
||||
|
||||
const grouped = new Map<string, { reasonCode: string; reasonLabel: string; durationSeconds: number; count: number }>();
|
||||
for (const row of filteredRows) {
|
||||
const key = `${row.reasonCode}:::${row.reasonLabel ?? row.reasonCode}`;
|
||||
const slot =
|
||||
grouped.get(key) ??
|
||||
{
|
||||
reasonCode: row.reasonCode,
|
||||
reasonLabel: row.reasonLabel ?? row.reasonCode,
|
||||
durationSeconds: 0,
|
||||
count: 0,
|
||||
};
|
||||
slot.durationSeconds += Math.max(0, row.durationSeconds ?? 0);
|
||||
slot.count += 1;
|
||||
grouped.set(key, slot);
|
||||
}
|
||||
|
||||
itemsRaw = [...grouped.values()]
|
||||
.map((g) => ({
|
||||
reasonCode: g.reasonCode,
|
||||
reasonLabel: g.reasonLabel,
|
||||
value: Math.round((g.durationSeconds / 60) * 10) / 10,
|
||||
count: g.count,
|
||||
}))
|
||||
.filter((x) => x.value > 0 || x.count > 0);
|
||||
} else {
|
||||
// Scrap path unchanged.
|
||||
const grouped = await prisma.reasonEntry.groupBy({
|
||||
by: ["reasonCode", "reasonLabel"],
|
||||
where: {
|
||||
orgId,
|
||||
...(machineId ? { machineId } : {}),
|
||||
kind,
|
||||
capturedAt: { gte: start },
|
||||
},
|
||||
_sum: { scrapQty: true },
|
||||
_count: { _all: true },
|
||||
});
|
||||
|
||||
itemsRaw = grouped
|
||||
.map((g) => ({
|
||||
reasonCode: g.reasonCode,
|
||||
reasonLabel: g.reasonLabel ?? g.reasonCode,
|
||||
value,
|
||||
value: g._sum.scrapQty ?? 0,
|
||||
count: g._count._all,
|
||||
};
|
||||
})
|
||||
.filter((x) =>
|
||||
kind === "downtime" || kind === "planned-downtime" ? x.value > 0 || x.count > 0 : x.value > 0
|
||||
);
|
||||
}))
|
||||
.filter((x) => x.value > 0);
|
||||
}
|
||||
|
||||
itemsRaw.sort((a, b) => b.value - a.value);
|
||||
|
||||
@@ -111,6 +162,9 @@ export async function GET(req: Request) {
|
||||
orgId,
|
||||
machineId: machineId ?? null,
|
||||
kind,
|
||||
planned: kind === "downtime" ? planned : kind === "planned-downtime" ? "planned" : "all",
|
||||
shift,
|
||||
microstopLtMin,
|
||||
includeMoldChange,
|
||||
range, // ✅ now defined correctly
|
||||
start, // ✅ now defined correctly
|
||||
|
||||
@@ -517,6 +517,14 @@ export async function POST(req: Request) {
|
||||
if (evRecord.is_update || evRecord.is_auto_ack || dataObj.is_update || dataObj.is_auto_ack){
|
||||
// skip duplicate reasonEntry for refresh/ack
|
||||
} else if (evReason || finalType === "microstop" || finalType === "macrostop" || finalType === "downtime-acknowledged" || finalType === "mold-change"){
|
||||
const fallbackIncidentKey =
|
||||
clampText(
|
||||
evData.incidentKey ??
|
||||
dataObj.incidentKey ??
|
||||
evDowntime?.incidentKey ??
|
||||
evReason?.incidentKey,
|
||||
128
|
||||
) ?? null;
|
||||
const moldIncidentKey =
|
||||
clampText(evData.incidentKey ?? dataObj.incidentKey, 128) ??
|
||||
(numberFrom(evData.start_ms ?? dataObj.start_ms) != null
|
||||
@@ -533,7 +541,7 @@ export async function POST(req: Request) {
|
||||
detailLabel: "Cambio molde",
|
||||
reasonCode: "MOLD_CHANGE",
|
||||
reasonText: "Cambio molde",
|
||||
incidentKey: moldIncidentKey ?? row.id,
|
||||
incidentKey: moldIncidentKey ?? fallbackIncidentKey ?? row.id,
|
||||
} as Record<string, unknown>)
|
||||
:
|
||||
({
|
||||
@@ -544,7 +552,7 @@ export async function POST(req: Request) {
|
||||
detailLabel: "Unclassified",
|
||||
reasonCode: "UNCLASSIFIED",
|
||||
reasonText: "Unclassified",
|
||||
incidentKey: row.id,
|
||||
incidentKey: fallbackIncidentKey ?? row.id,
|
||||
} as Record<string, unknown>));
|
||||
|
||||
const inferredKind: ReasonCatalogKind =
|
||||
@@ -554,10 +562,18 @@ export async function POST(req: Request) {
|
||||
const resolved = resolveReason(reasonRaw, inferredKind, reasonCatalog, reasonCatalog.version);
|
||||
|
||||
if (resolved.reasonCode) {
|
||||
const continuityIncidentKey =
|
||||
inferredKind === "downtime"
|
||||
? clampText((reasonRaw as any).incidentKey ?? evDowntime?.incidentKey ?? fallbackIncidentKey, 128) ?? row.id
|
||||
: null;
|
||||
const reasonMetaIncidentKey =
|
||||
inferredKind === "downtime"
|
||||
? continuityIncidentKey
|
||||
: clampText((reasonRaw as any).incidentKey ?? evDowntime?.incidentKey, 128);
|
||||
const reasonId =
|
||||
clampText(reasonRaw.reasonId, 128) ??
|
||||
(inferredKind === "downtime"
|
||||
? `evt:${machine.id}:downtime:${clampText((reasonRaw as any).incidentKey ?? evDowntime?.incidentKey, 128) ?? row.id}`
|
||||
? `evt:${machine.id}:downtime:${continuityIncidentKey ?? row.id}`
|
||||
: `evt:${machine.id}:scrap:${clampText(reasonRaw.scrapEntryId, 128) ?? row.id}`);
|
||||
|
||||
const workOrderId =
|
||||
@@ -577,7 +593,7 @@ export async function POST(req: Request) {
|
||||
source: "ingest:event",
|
||||
eventId: row.id,
|
||||
eventType: row.eventType,
|
||||
incidentKey: clampText((reasonRaw as any).incidentKey ?? evDowntime?.incidentKey, 128),
|
||||
incidentKey: reasonMetaIncidentKey,
|
||||
anomalyType:
|
||||
clampText(evRecord.anomalyType, 64) ??
|
||||
clampText(evDowntime?.anomalyType, 64) ??
|
||||
@@ -595,7 +611,7 @@ export async function POST(req: Request) {
|
||||
};
|
||||
|
||||
if (inferredKind === "downtime") {
|
||||
const incidentKey = clampText((reasonRaw as any).incidentKey ?? evDowntime?.incidentKey, 128) ?? row.id;
|
||||
const incidentKey = continuityIncidentKey ?? row.id;
|
||||
const durationSeconds =
|
||||
numberFrom(evDowntime?.durationSeconds) ??
|
||||
numberFrom(evData.duration_sec) ??
|
||||
@@ -641,7 +657,7 @@ export async function POST(req: Request) {
|
||||
source: "ingest:event",
|
||||
eventId: row.id,
|
||||
eventType: row.eventType,
|
||||
incidentKey: clampText((reasonRaw as any).incidentKey ?? evDowntime?.incidentKey, 128),
|
||||
incidentKey: reasonMetaIncidentKey,
|
||||
anomalyType:
|
||||
clampText(evRecord.anomalyType, 64) ??
|
||||
clampText(evDowntime?.anomalyType, 64) ??
|
||||
|
||||
@@ -47,6 +47,10 @@ function safeNum(v: unknown) {
|
||||
return typeof v === "number" && Number.isFinite(v) ? v : null;
|
||||
}
|
||||
|
||||
function isProductionSnapshot(trackingEnabled: unknown, productionStarted: unknown) {
|
||||
return trackingEnabled === true && productionStarted === true;
|
||||
}
|
||||
|
||||
function toMs(value?: Date | null) {
|
||||
return value ? value.getTime() : 0;
|
||||
}
|
||||
@@ -137,6 +141,8 @@ export async function GET(req: NextRequest) {
|
||||
good: true,
|
||||
scrap: true,
|
||||
target: true,
|
||||
trackingEnabled: true,
|
||||
productionStarted: true,
|
||||
machineId: true,
|
||||
},
|
||||
});
|
||||
@@ -151,7 +157,9 @@ export async function GET(req: NextRequest) {
|
||||
let qualSum = 0;
|
||||
let qualCount = 0;
|
||||
|
||||
// OEE-family summaries are production-only to avoid mixing downtime/off windows.
|
||||
for (const k of kpiRows) {
|
||||
if (!isProductionSnapshot(k.trackingEnabled, k.productionStarted)) continue;
|
||||
if (safeNum(k.oee) != null) {
|
||||
oeeSum += Number(k.oee);
|
||||
oeeCount += 1;
|
||||
@@ -274,7 +282,7 @@ export async function GET(req: NextRequest) {
|
||||
else if (type === "oee-drop") oeeDropCount += 1;
|
||||
}
|
||||
|
||||
type TrendPoint = { t: string; v: number };
|
||||
type TrendPoint = { t: string; v: number | null };
|
||||
|
||||
const trend: {
|
||||
oee: TrendPoint[];
|
||||
@@ -292,10 +300,18 @@ export async function GET(req: NextRequest) {
|
||||
|
||||
for (const k of kpiRows) {
|
||||
const t = k.ts.toISOString();
|
||||
if (safeNum(k.oee) != null) trend.oee.push({ t, v: Number(k.oee) });
|
||||
if (safeNum(k.availability) != null) trend.availability.push({ t, v: Number(k.availability) });
|
||||
if (safeNum(k.performance) != null) trend.performance.push({ t, v: Number(k.performance) });
|
||||
if (safeNum(k.quality) != null) trend.quality.push({ t, v: Number(k.quality) });
|
||||
if (!isProductionSnapshot(k.trackingEnabled, k.productionStarted)) {
|
||||
// Preserve timeline gaps across non-production windows for OEE-family charting.
|
||||
trend.oee.push({ t, v: null });
|
||||
trend.availability.push({ t, v: null });
|
||||
trend.performance.push({ t, v: null });
|
||||
trend.quality.push({ t, v: null });
|
||||
} else {
|
||||
trend.oee.push({ t, v: safeNum(k.oee) != null ? Number(k.oee) : null });
|
||||
trend.availability.push({ t, v: safeNum(k.availability) != null ? Number(k.availability) : null });
|
||||
trend.performance.push({ t, v: safeNum(k.performance) != null ? Number(k.performance) : null });
|
||||
trend.quality.push({ t, v: safeNum(k.quality) != null ? Number(k.quality) : null });
|
||||
}
|
||||
|
||||
const good = safeNum(k.good);
|
||||
const scrap = safeNum(k.scrap);
|
||||
|
||||
@@ -246,18 +246,6 @@ function buildParetoFromEvents(events: ApiDowntimeEvent[]): ApiParetoRes | null
|
||||
}
|
||||
|
||||
|
||||
type ApiCoverageRes = {
|
||||
ok: boolean;
|
||||
error?: string;
|
||||
orgId?: string;
|
||||
machineId?: string | null;
|
||||
range?: "24h" | "7d" | "30d";
|
||||
start?: string;
|
||||
receivedEpisodes?: number;
|
||||
receivedMinutes?: number;
|
||||
note?: string;
|
||||
};
|
||||
|
||||
type Range = "24h" | "7d" | "30d";
|
||||
type Metric = "minutes" | "count";
|
||||
|
||||
@@ -1297,6 +1285,9 @@ export default function DowntimePageClient() {
|
||||
// client-only filters (shareable)
|
||||
const metric = ((sp.get("metric") as Metric) || "minutes") as Metric;
|
||||
const reasonCode = sp.get("reasonCode") || null;
|
||||
const shift = (sp.get("shift") || "all").toUpperCase();
|
||||
const planned = (sp.get("planned") as "all" | "planned" | "unplanned") || "all";
|
||||
const microstopLtMin = sp.get("microstopLtMin") || "2";
|
||||
|
||||
const hmDay = sp.get("hmDay");
|
||||
const hmHour = sp.get("hmHour");
|
||||
@@ -1308,7 +1299,6 @@ export default function DowntimePageClient() {
|
||||
|
||||
|
||||
const [pareto, setPareto] = useState<ApiParetoRes | null>(null);
|
||||
const [coverage, setCoverage] = useState<ApiCoverageRes | null>(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [err, setErr] = useState<string | null>(null);
|
||||
const [eventsRes, setEventsRes] = useState<ApiDowntimeEventsRes | null>(null);
|
||||
@@ -1364,40 +1354,27 @@ export default function DowntimePageClient() {
|
||||
qs.set("kind", "downtime");
|
||||
qs.set("range", range);
|
||||
if (machineId) qs.set("machineId", machineId);
|
||||
qs.set("shift", shift);
|
||||
qs.set("planned", planned);
|
||||
qs.set("microstopLtMin", microstopLtMin);
|
||||
|
||||
const [r1, r2] = await Promise.all([
|
||||
fetch(`/api/analytics/pareto?${qs.toString()}`, {
|
||||
cache: "no-cache",
|
||||
credentials: "include",
|
||||
signal: ac.signal,
|
||||
}),
|
||||
fetch(`/api/analytics/coverage?${qs.toString()}`, {
|
||||
cache: "no-cache",
|
||||
credentials: "include",
|
||||
signal: ac.signal,
|
||||
}),
|
||||
]);
|
||||
const r1 = await fetch(`/api/analytics/pareto?${qs.toString()}`, {
|
||||
cache: "no-cache",
|
||||
credentials: "include",
|
||||
signal: ac.signal,
|
||||
});
|
||||
|
||||
const j1raw = (await r1.json().catch(() => ({}))) as ApiParetoRes;
|
||||
const j2 = (await r2.json().catch(() => ({}))) as ApiCoverageRes;
|
||||
|
||||
if (!alive) return;
|
||||
|
||||
if (!r1.ok || j1raw.ok === false) {
|
||||
setErr(j1raw?.error ?? "Failed to load pareto");
|
||||
setPareto(null);
|
||||
setCoverage(null);
|
||||
setLoading(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!r2.ok || j2.ok === false) {
|
||||
// coverage is “nice to have” — don’t kill the page
|
||||
setCoverage(null);
|
||||
} else {
|
||||
setCoverage(j2);
|
||||
}
|
||||
|
||||
setPareto(normalizeParetoRes(j1raw));
|
||||
setLoading(false);
|
||||
} catch (e: any) {
|
||||
@@ -1412,7 +1389,7 @@ export default function DowntimePageClient() {
|
||||
alive = false;
|
||||
ac.abort();
|
||||
};
|
||||
}, [range, machineId]);
|
||||
}, [range, machineId, shift, planned, microstopLtMin]);
|
||||
|
||||
useEffect(() => {
|
||||
let alive = true;
|
||||
@@ -1462,6 +1439,9 @@ export default function DowntimePageClient() {
|
||||
qs.set("limit", String(eventsLimit));
|
||||
if (machineId) qs.set("machineId", machineId);
|
||||
if (reasonCode) qs.set("reasonCode", reasonCode);
|
||||
qs.set("shift", shift);
|
||||
qs.set("planned", planned);
|
||||
qs.set("microstopLtMin", microstopLtMin);
|
||||
if (eventsBefore) qs.set("before", eventsBefore);
|
||||
|
||||
const r = await fetch(`/api/analytics/downtime-events?${qs.toString()}`, {
|
||||
@@ -1494,7 +1474,7 @@ export default function DowntimePageClient() {
|
||||
alive = false;
|
||||
ac.abort();
|
||||
};
|
||||
}, [range, machineId, reasonCode, eventsLimit, eventsBefore]);
|
||||
}, [range, machineId, reasonCode, shift, planned, microstopLtMin, eventsLimit, eventsBefore]);
|
||||
|
||||
// Derived data
|
||||
const events = eventsRes?.events ?? [];
|
||||
@@ -1582,7 +1562,7 @@ const totalDowntimeMin = paretoEffective?.totalMinutesLost ?? 0;
|
||||
|
||||
useEffect(() => {
|
||||
setEventsBefore(null);
|
||||
}, [range, machineId, reasonCode]);
|
||||
}, [range, machineId, reasonCode, shift, planned, microstopLtMin]);
|
||||
|
||||
const filteredEvents = useMemo(() => {
|
||||
let list = events;
|
||||
@@ -1612,8 +1592,8 @@ const filteredEvents = useMemo(() => {
|
||||
|
||||
|
||||
|
||||
// Use distinct episodes as "stops" (best available now)
|
||||
const stops = coverage?.receivedEpisodes ?? totalStops;
|
||||
// Use filtered pareto totals so top filters always affect the KPI.
|
||||
const stops = totalStops;
|
||||
|
||||
// Window minutes for MTBF/Availability
|
||||
const windowMin =
|
||||
@@ -1728,11 +1708,6 @@ const estImpactMxn = rate > 0 ? totalDowntimeMin * rate : 0;
|
||||
);
|
||||
|
||||
|
||||
const shift = sp.get("shift") || "all";
|
||||
const planned = (sp.get("planned") as "all" | "planned" | "unplanned") || "all";
|
||||
const microstopLtMin = sp.get("microstopLtMin") || "2";
|
||||
|
||||
|
||||
const filtersRow = (
|
||||
<div className="mt-4 flex items-center justify-between gap-4">
|
||||
{/* LEFT: range + metric + reset (never wrap) */}
|
||||
@@ -2018,7 +1993,7 @@ const estImpactMxn = rate > 0 ? totalDowntimeMin * rate : 0;
|
||||
<KPI
|
||||
label="Stops count"
|
||||
value={fmtNum(stops, 0)}
|
||||
sub="Distinct episodes (coverage)"
|
||||
sub="Distinct episodes (filtered)"
|
||||
accent="zinc"
|
||||
/>
|
||||
<KPI
|
||||
@@ -2247,29 +2222,25 @@ const estImpactMxn = rate > 0 ? totalDowntimeMin * rate : 0;
|
||||
|
||||
{/* Coverage mini */}
|
||||
<div className="mt-4 rounded-2xl border border-white/10 bg-white/5 p-4">
|
||||
<div className="text-sm font-semibold text-white">Coverage received</div>
|
||||
<div className="text-sm font-semibold text-white">Filtered downtime summary</div>
|
||||
<div className="mt-1 text-xs text-zinc-400">
|
||||
Sync health from Control Tower ingest
|
||||
Reflects the active range/machine/shift/planned/microstop filters
|
||||
</div>
|
||||
|
||||
<div className="mt-3 grid grid-cols-2 gap-3">
|
||||
<div className="rounded-xl border border-white/10 bg-black/20 p-3">
|
||||
<div className="text-[11px] text-zinc-400">Episodes</div>
|
||||
<div className="mt-1 text-base font-semibold text-white">
|
||||
{coverage?.receivedEpisodes != null ? fmtNum(coverage.receivedEpisodes, 0) : "—"}
|
||||
{fmtNum(stops, 0)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="rounded-xl border border-white/10 bg-black/20 p-3">
|
||||
<div className="text-[11px] text-zinc-400">Minutes</div>
|
||||
<div className="mt-1 text-base font-semibold text-white">
|
||||
{coverage?.receivedMinutes != null ? fmtNum(coverage.receivedMinutes, 1) : "—"}
|
||||
{fmtNum(totalDowntimeMin, 1)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{coverage?.note ? (
|
||||
<div className="mt-3 text-[11px] text-zinc-500">{coverage.note}</div>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
244
fix4.md
Normal file
244
fix4.md
Normal file
@@ -0,0 +1,244 @@
|
||||
Task: Implement Control Tower changes only (no Node-RED edits), then run full verification with SQL + backfill script.
|
||||
|
||||
Repository context:
|
||||
- Workspace root: Plastic-Dashboard
|
||||
- Target branch assumption: sandbox-main
|
||||
- Database: PostgreSQL via Prisma
|
||||
- Scope strictly limited to Control Tower code and scripts in this repo
|
||||
|
||||
Hard constraints:
|
||||
1. Do NOT edit any Node-RED flow files or Node-RED runtime code.
|
||||
2. Do NOT change behavior outside the requested areas unless required for correctness.
|
||||
3. Preserve existing non-authoritative guard behavior for downtime reasons (PENDIENTE / UNCLASSIFIED).
|
||||
4. Run verification before and after backfill, and report results clearly.
|
||||
5. If lint/test has unrelated pre-existing failures, do not refactor unrelated modules.
|
||||
|
||||
Implementation requirements:
|
||||
|
||||
A) Downtime continuity fallback key fix
|
||||
File:
|
||||
- app/api/ingest/event/route.ts
|
||||
|
||||
Goal:
|
||||
- Ensure fallback downtime reason identity/continuity uses episode continuity key (incidentKey) whenever present.
|
||||
- Use row.id only when incidentKey is truly absent.
|
||||
- Preserve guard that prevents non-authoritative values from overwriting authoritative manual reasons.
|
||||
|
||||
Details:
|
||||
1. In the event ingestion logic where ReasonEntry payload is created for downtime-like events (including fallback UNCLASSIFIED and mold-change):
|
||||
- Derive a fallbackIncidentKey from available payload fields in this preference order:
|
||||
- evData.incidentKey
|
||||
- dataObj.incidentKey
|
||||
- evDowntime?.incidentKey
|
||||
- evReason?.incidentKey (if available)
|
||||
- Only if all are missing, fallback to row.id.
|
||||
|
||||
2. For fallback reasonRaw objects:
|
||||
- For mold-change fallback, set incidentKey to moldIncidentKey ?? fallbackIncidentKey ?? row.id.
|
||||
- For unclassified fallback, set incidentKey to fallbackIncidentKey ?? row.id.
|
||||
|
||||
3. Create one continuityIncidentKey (single source of truth) used consistently for:
|
||||
- downtime reasonId construction (evt:<machineId>:downtime:<continuityIncidentKey>)
|
||||
- ReasonEntry episodeId for downtime
|
||||
- meta.incidentKey in reason entry writes
|
||||
- manual-preservation guard queries by episodeId
|
||||
|
||||
4. Keep non-authoritative guard semantics unchanged:
|
||||
- incoming non-authoritative reason should not overwrite existing authoritative reason for same episode
|
||||
- downtime-acknowledged/manual authoritative path remains preserved
|
||||
|
||||
B) OEE trend from production-only snapshots
|
||||
File:
|
||||
- app/api/reports/route.ts
|
||||
|
||||
Goal:
|
||||
- Build OEE trend from production-only snapshots:
|
||||
- trackingEnabled = true
|
||||
- productionStarted = true
|
||||
- Keep summary metrics behavior explicit and consistent with this filtering decision.
|
||||
|
||||
Details:
|
||||
1. Include trackingEnabled and productionStarted in KPI snapshot select.
|
||||
2. Add helper like isProductionSnapshot(trackingEnabled, productionStarted).
|
||||
3. Compute OEE/Availability/Performance/Quality averages using production-only rows.
|
||||
4. For trend generation:
|
||||
- Iterate timeline in ts order.
|
||||
- For non-production snapshots, emit null points (for OEE and related KPI lines) so chart can render true gaps.
|
||||
- For production snapshots, emit actual numeric values (or null if value is missing).
|
||||
5. Keep downtime/event aggregates and cycle-based totals behavior intact unless explicitly tied to OEE production-only requirement.
|
||||
6. Keep logic explicit in code comments (short, concrete comments only where needed).
|
||||
|
||||
C) Chart rendering behavior: no smoothing across gaps
|
||||
Files:
|
||||
- app/(app)/reports/ReportsCharts.tsx
|
||||
- app/(app)/reports/ReportsPageClient.tsx (if types/downsampling need updates)
|
||||
|
||||
Goal:
|
||||
- OEE line interpolation must be linear.
|
||||
- Gaps must be rendered as gaps (no fake continuity through filtered/non-production windows).
|
||||
|
||||
Details:
|
||||
1. In OEE line chart:
|
||||
- change Line type from monotone to linear
|
||||
- set connectNulls={false}
|
||||
2. Ensure frontend types allow nullable trend values for OEE points.
|
||||
3. If downsampling exists, preserve gap markers so null separators are not removed.
|
||||
- Keep null transition points when reducing point count.
|
||||
4. Ensure tooltip/value formatting handles nulls gracefully.
|
||||
|
||||
Verification and execution steps:
|
||||
|
||||
1) Run targeted checks first
|
||||
- run tests related to downtime guard if available:
|
||||
- npm run test:downtime-reason-guard
|
||||
- run lint at least for changed files (or full lint if practical):
|
||||
- npx eslint app/api/ingest/event/route.ts app/api/reports/route.ts app/(app)/reports/ReportsCharts.tsx app/(app)/reports/ReportsPageClient.tsx
|
||||
|
||||
2) SQL Verification Pack (PRE-BACKFILL)
|
||||
Execute these exactly and capture output snapshots:
|
||||
|
||||
A. Recent downtime reason quality mix
|
||||
SELECT
|
||||
reasonCode,
|
||||
COUNT(*) AS rows
|
||||
FROM "ReasonEntry"
|
||||
WHERE kind = 'downtime'
|
||||
AND "capturedAt" >= NOW() - INTERVAL '7 days'
|
||||
GROUP BY reasonCode
|
||||
ORDER BY rows DESC;
|
||||
|
||||
B. Episodes with conflicting reason codes
|
||||
SELECT
|
||||
"orgId",
|
||||
"machineId",
|
||||
"episodeId",
|
||||
COUNT(DISTINCT "reasonCode") AS distinct_codes,
|
||||
MIN("capturedAt") AS first_seen,
|
||||
MAX("capturedAt") AS last_seen
|
||||
FROM "ReasonEntry"
|
||||
WHERE kind = 'downtime'
|
||||
AND "episodeId" IS NOT NULL
|
||||
AND "capturedAt" >= NOW() - INTERVAL '14 days'
|
||||
GROUP BY "orgId", "machineId", "episodeId"
|
||||
HAVING COUNT(DISTINCT "reasonCode") > 1
|
||||
ORDER BY last_seen DESC
|
||||
LIMIT 200;
|
||||
|
||||
C. Potential manual overwritten by non-authoritative check
|
||||
SELECT
|
||||
re."orgId",
|
||||
re."machineId",
|
||||
re."episodeId",
|
||||
re."reasonCode",
|
||||
re."capturedAt",
|
||||
re.meta
|
||||
FROM "ReasonEntry" re
|
||||
WHERE re.kind = 'downtime'
|
||||
AND re."capturedAt" >= NOW() - INTERVAL '14 days'
|
||||
AND re."reasonCode" IN ('PENDIENTE', 'UNCLASSIFIED')
|
||||
ORDER BY re."capturedAt" DESC
|
||||
LIMIT 200;
|
||||
|
||||
D. Event continuity around downtime + ack
|
||||
SELECT
|
||||
"machineId",
|
||||
"eventType",
|
||||
ts,
|
||||
data->>'incidentKey' AS incident_key,
|
||||
data->>'status' AS status,
|
||||
data->>'is_update' AS is_update,
|
||||
data->>'is_auto_ack' AS is_auto_ack
|
||||
FROM "MachineEvent"
|
||||
WHERE ts >= NOW() - INTERVAL '3 days'
|
||||
AND "eventType" IN ('microstop', 'macrostop', 'downtime-acknowledged')
|
||||
ORDER BY ts DESC
|
||||
LIMIT 500;
|
||||
|
||||
E. KPI production vs non-production counts
|
||||
SELECT
|
||||
COALESCE("trackingEnabled", false) AS tracking_enabled,
|
||||
COALESCE("productionStarted", false) AS production_started,
|
||||
COUNT(*) AS rows
|
||||
FROM "MachineKpiSnapshot"
|
||||
WHERE ts >= NOW() - INTERVAL '7 days'
|
||||
GROUP BY 1,2
|
||||
ORDER BY rows DESC;
|
||||
|
||||
F. Sharp OEE jumps in production snapshots
|
||||
WITH k AS (
|
||||
SELECT
|
||||
"machineId",
|
||||
ts,
|
||||
oee,
|
||||
LAG(oee) OVER (PARTITION BY "machineId" ORDER BY ts) AS prev_oee
|
||||
FROM "MachineKpiSnapshot"
|
||||
WHERE ts >= NOW() - INTERVAL '7 days'
|
||||
AND "trackingEnabled" = true
|
||||
AND "productionStarted" = true
|
||||
AND oee IS NOT NULL
|
||||
)
|
||||
SELECT
|
||||
"machineId",
|
||||
ts,
|
||||
prev_oee,
|
||||
oee,
|
||||
ABS(oee - prev_oee) AS delta
|
||||
FROM k
|
||||
WHERE prev_oee IS NOT NULL
|
||||
AND ABS(oee - prev_oee) >= 25
|
||||
ORDER BY delta DESC, ts DESC
|
||||
LIMIT 200;
|
||||
|
||||
G. Trend point count comparison
|
||||
SELECT
|
||||
'all' AS series,
|
||||
COUNT(*) AS points
|
||||
FROM "MachineKpiSnapshot"
|
||||
WHERE ts >= NOW() - INTERVAL '24 hours'
|
||||
AND oee IS NOT NULL
|
||||
UNION ALL
|
||||
SELECT
|
||||
'production_only' AS series,
|
||||
COUNT(*) AS points
|
||||
FROM "MachineKpiSnapshot"
|
||||
WHERE ts >= NOW() - INTERVAL '24 hours'
|
||||
AND oee IS NOT NULL
|
||||
AND "trackingEnabled" = true
|
||||
AND "productionStarted" = true;
|
||||
|
||||
3) Backfill run plan (must follow this order)
|
||||
A. Dry-run first:
|
||||
node scripts/backfill-downtime-reasons.mjs --dry-run --since 30d
|
||||
|
||||
B. Review dry-run output:
|
||||
- candidates
|
||||
- sampleUpdates
|
||||
- incident distribution by machine
|
||||
- any suspicious replacements
|
||||
|
||||
C. Apply scoped first (single machine from dry-run sample):
|
||||
node scripts/backfill-downtime-reasons.mjs --since 30d --machine-id <machine_uuid>
|
||||
|
||||
4) SQL Verification Pack (POST-BACKFILL)
|
||||
- Re-run queries A, B, C at minimum.
|
||||
- Optionally rerun D/F/G for confidence.
|
||||
- Confirm reduction in stale PENDIENTE/UNCLASSIFIED rows where authoritative reason exists.
|
||||
- Confirm conflicting episode reason cases reduced or shifted as expected.
|
||||
|
||||
Acceptance criteria checklist:
|
||||
- New downtime episodes retain authoritative manual reason and do not regress to PENDIENTE/UNCLASSIFIED.
|
||||
- Fallback downtime continuity now keys by incidentKey whenever available; row.id only when absent.
|
||||
- OEE trend no longer shows implausible 0/100 jumps from non-production snapshots.
|
||||
- OEE chart is linear and visually shows true gaps (no smoothing continuity across filtered windows).
|
||||
- Backfill dry-run and scoped apply outputs are captured and reasonable.
|
||||
- Post-run SQL confirms expected improvements without obvious regressions.
|
||||
|
||||
Output format required from you:
|
||||
1. Files changed with concise reason per file.
|
||||
2. Exact diff summary for each modified file.
|
||||
3. Test/lint commands run + result.
|
||||
4. Pre-backfill SQL results (compact tables or summarized counts).
|
||||
5. Dry-run output summary (key fields + sample updates).
|
||||
6. Scoped apply command used and output summary.
|
||||
7. Post-backfill SQL delta summary (before vs after).
|
||||
8. Any blockers (env vars, DB auth, migration state, etc.) and exactly what is needed to unblock.
|
||||
204
lib/analytics/downtimeFilters.ts
Normal file
204
lib/analytics/downtimeFilters.ts
Normal file
@@ -0,0 +1,204 @@
|
||||
import { prisma } from "@/lib/prisma";
|
||||
import { normalizeShiftOverrides } from "@/lib/settings";
|
||||
|
||||
type PlannedFilter = "all" | "planned" | "unplanned";
|
||||
type ShiftFilter = "all" | "A" | "B" | "C";
|
||||
|
||||
type ShiftLike = {
|
||||
name: string;
|
||||
startTime?: string | null;
|
||||
endTime?: string | null;
|
||||
start?: string | null;
|
||||
end?: string | null;
|
||||
enabled?: boolean;
|
||||
};
|
||||
|
||||
type ShiftContext = {
|
||||
timeZone: string;
|
||||
shifts: ShiftLike[];
|
||||
overrides: Record<string, ShiftLike[]> | undefined;
|
||||
};
|
||||
|
||||
const SHIFT_ALIAS: ShiftFilter[] = ["A", "B", "C"];
|
||||
const TIME_RE = /^([01]\d|2[0-3]):([0-5]\d)$/;
|
||||
|
||||
const WEEKDAY_KEY_MAP: Record<string, string> = {
|
||||
Sun: "sun",
|
||||
Mon: "mon",
|
||||
Tue: "tue",
|
||||
Wed: "wed",
|
||||
Thu: "thu",
|
||||
Fri: "fri",
|
||||
Sat: "sat",
|
||||
};
|
||||
|
||||
const WEEKDAY_KEYS = ["sun", "mon", "tue", "wed", "thu", "fri", "sat"] as const;
|
||||
|
||||
function asRecord(value: unknown) {
|
||||
return value && typeof value === "object" && !Array.isArray(value)
|
||||
? (value as Record<string, unknown>)
|
||||
: null;
|
||||
}
|
||||
|
||||
function parseTimeMinutes(value?: string | null) {
|
||||
if (!value || !TIME_RE.test(value)) return null;
|
||||
const [hh, mm] = value.split(":");
|
||||
return Number(hh) * 60 + Number(mm);
|
||||
}
|
||||
|
||||
function getLocalMinutes(ts: Date, timeZone: string) {
|
||||
try {
|
||||
const parts = new Intl.DateTimeFormat("en-US", {
|
||||
timeZone,
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
hourCycle: "h23",
|
||||
}).formatToParts(ts);
|
||||
const hours = Number(parts.find((p) => p.type === "hour")?.value ?? "0");
|
||||
const minutes = Number(parts.find((p) => p.type === "minute")?.value ?? "0");
|
||||
return hours * 60 + minutes;
|
||||
} catch {
|
||||
return ts.getUTCHours() * 60 + ts.getUTCMinutes();
|
||||
}
|
||||
}
|
||||
|
||||
function getLocalDayKey(ts: Date, timeZone: string) {
|
||||
try {
|
||||
const weekday = new Intl.DateTimeFormat("en-US", {
|
||||
timeZone,
|
||||
weekday: "short",
|
||||
}).format(ts);
|
||||
return WEEKDAY_KEY_MAP[weekday] ?? WEEKDAY_KEYS[ts.getUTCDay()];
|
||||
} catch {
|
||||
return WEEKDAY_KEYS[ts.getUTCDay()];
|
||||
}
|
||||
}
|
||||
|
||||
function resolveShiftAlias(context: ShiftContext, ts: Date): ShiftFilter | null {
|
||||
const dayKey = getLocalDayKey(ts, context.timeZone);
|
||||
const dayOverrides = context.overrides?.[dayKey];
|
||||
const activeShifts = dayOverrides ?? context.shifts;
|
||||
if (!activeShifts.length) return null;
|
||||
|
||||
const nowMin = getLocalMinutes(ts, context.timeZone);
|
||||
let enabledOrdinal = 0;
|
||||
for (const shift of activeShifts) {
|
||||
if (shift.enabled === false) continue;
|
||||
const start = parseTimeMinutes(shift.startTime ?? shift.start ?? null);
|
||||
const end = parseTimeMinutes(shift.endTime ?? shift.end ?? null);
|
||||
if (start == null || end == null) continue;
|
||||
|
||||
const alias = SHIFT_ALIAS[enabledOrdinal] ?? null;
|
||||
enabledOrdinal += 1;
|
||||
if (!alias) continue;
|
||||
|
||||
if (start <= end) {
|
||||
if (nowMin >= start && nowMin < end) return alias;
|
||||
} else if (nowMin >= start || nowMin < end) {
|
||||
return alias;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function isMicrostopLike(row: {
|
||||
episodeId?: string | null;
|
||||
meta?: unknown;
|
||||
}) {
|
||||
const episodeId = String(row.episodeId ?? "").toLowerCase();
|
||||
if (episodeId.startsWith("microstop:")) return true;
|
||||
|
||||
const meta = asRecord(row.meta);
|
||||
const anomalyType = String(meta?.anomalyType ?? "").toLowerCase();
|
||||
if (anomalyType === "microstop") return true;
|
||||
|
||||
const eventType = String(meta?.eventType ?? "").toLowerCase();
|
||||
return eventType === "microstop";
|
||||
}
|
||||
|
||||
function normalizePlanned(raw: string | null): PlannedFilter {
|
||||
const v = String(raw ?? "").trim().toLowerCase();
|
||||
if (v === "planned") return "planned";
|
||||
if (v === "unplanned") return "unplanned";
|
||||
return "all";
|
||||
}
|
||||
|
||||
export function resolvePlannedFilter(raw: string | null, includeMoldChange: boolean): PlannedFilter {
|
||||
const normalized = normalizePlanned(raw);
|
||||
if (raw != null && String(raw).trim() !== "") return normalized;
|
||||
return includeMoldChange ? "all" : "unplanned";
|
||||
}
|
||||
|
||||
export function normalizeShiftFilter(raw: string | null): ShiftFilter {
|
||||
const v = String(raw ?? "").trim().toUpperCase();
|
||||
if (v === "A" || v === "B" || v === "C") return v;
|
||||
return "all";
|
||||
}
|
||||
|
||||
export function normalizeMicrostopLtMin(raw: string | null) {
|
||||
if (!raw) return null;
|
||||
const n = Number(raw);
|
||||
if (!Number.isFinite(n) || n <= 0) return null;
|
||||
return n;
|
||||
}
|
||||
|
||||
function passesPlannedFilter(reasonCode: string, planned: PlannedFilter) {
|
||||
if (planned === "planned") return reasonCode === "MOLD_CHANGE";
|
||||
if (planned === "unplanned") return reasonCode !== "MOLD_CHANGE";
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function loadDowntimeShiftContext(orgId: string): Promise<ShiftContext> {
|
||||
const [shifts, settings] = await Promise.all([
|
||||
prisma.orgShift.findMany({
|
||||
where: { orgId },
|
||||
orderBy: { sortOrder: "asc" },
|
||||
select: { name: true, startTime: true, endTime: true, enabled: true },
|
||||
}),
|
||||
prisma.orgSettings.findUnique({
|
||||
where: { orgId },
|
||||
select: { timezone: true, shiftScheduleOverridesJson: true },
|
||||
}),
|
||||
]);
|
||||
|
||||
return {
|
||||
timeZone: settings?.timezone || "UTC",
|
||||
shifts,
|
||||
overrides: normalizeShiftOverrides(settings?.shiftScheduleOverridesJson),
|
||||
};
|
||||
}
|
||||
|
||||
export function applyDowntimeFilters<T extends {
|
||||
reasonCode: string;
|
||||
capturedAt: Date;
|
||||
durationSeconds?: number | null;
|
||||
episodeId?: string | null;
|
||||
meta?: unknown;
|
||||
}>(
|
||||
rows: T[],
|
||||
options: {
|
||||
planned: PlannedFilter;
|
||||
shift: ShiftFilter;
|
||||
microstopLtMin: number | null;
|
||||
shiftContext: ShiftContext | null;
|
||||
}
|
||||
) {
|
||||
return rows.filter((row) => {
|
||||
if (!passesPlannedFilter(row.reasonCode, options.planned)) return false;
|
||||
|
||||
if (options.shift !== "all") {
|
||||
if (!options.shiftContext) return false;
|
||||
const alias = resolveShiftAlias(options.shiftContext, row.capturedAt);
|
||||
if (alias !== options.shift) return false;
|
||||
}
|
||||
|
||||
if (options.microstopLtMin != null && isMicrostopLike(row)) {
|
||||
if (row.durationSeconds == null) return false;
|
||||
const durationMin = row.durationSeconds / 60;
|
||||
if (!(durationMin < options.microstopLtMin)) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user