diff --git a/WWW/Image1.PNG b/WWW/Image1.PNG
new file mode 100644
index 0000000..f172327
Binary files /dev/null and b/WWW/Image1.PNG differ
diff --git a/WWW/Image2.PNG b/WWW/Image2.PNG
new file mode 100644
index 0000000..d4837d8
Binary files /dev/null and b/WWW/Image2.PNG differ
diff --git a/WWW/Image3.PNG b/WWW/Image3.PNG
new file mode 100644
index 0000000..ca7a798
Binary files /dev/null and b/WWW/Image3.PNG differ
diff --git a/WWW/Image4.PNG b/WWW/Image4.PNG
new file mode 100644
index 0000000..e7e1d05
Binary files /dev/null and b/WWW/Image4.PNG differ
diff --git a/WWW/anon_assets/custom.css b/WWW/anon_assets/custom.css
new file mode 100644
index 0000000..b607588
--- /dev/null
+++ b/WWW/anon_assets/custom.css
@@ -0,0 +1,290 @@
+/* custom.css */
+
+/* -----------------------------------------------------------------------
+ 0) Base reset + scope
+ ----------------------------------------------------------------------- */
+.anon-root,
+.anon-root *,
+.anon-root *::before,
+.anon-root *::after {
+ box-sizing: border-box;
+}
+
+.anon-root {
+ width: 100%;
+ min-height: 85vh;
+ position: relative;
+ overflow: hidden; /* keep module self-contained */
+ font-family: Arial, sans-serif;
+ padding: 0 !important;
+ margin: 0 !important;
+}
+
+/* Allow children to shrink properly (important for scroll containers) */
+.anon-root .container-fluid,
+.anon-root .row,
+.anon-root [class*="col-"],
+.anon-root .tab-content,
+.anon-root .tab-pane,
+.anon-root .tab-pane.active {
+ min-height: 0 !important;
+ min-width: 0 !important;
+}
+
+/* -----------------------------------------------------------------------
+ 1) Dashboard container (landing removed)
+ ----------------------------------------------------------------------- */
+.anon-root .anon-dashboard {
+ min-height: 85vh;
+}
+
+/* tab content should not force the whole page to grow */
+.anon-root .anon-dashboard .tab-content {
+ min-height: 0 !important;
+}
+
+/* -----------------------------------------------------------------------
+ 2) Layout heights (make the anonymization dashboard row fixed-height)
+ ----------------------------------------------------------------------- */
+/* Targets the row inside the Dashboard tabPanel */
+.anon-root .anon-dashboard .tab-pane.active > .row {
+ height: calc(100vh - 160px); /* adjust if your top header spacing differs */
+ overflow: hidden !important;
+}
+
+/* LEFT PANEL: the primary vertical scroller */
+.anon-root [id$="left-panel"] {
+ height: 100%;
+ overflow-y: auto !important;
+ overflow-x: hidden !important;
+ padding-right: 8px;
+}
+
+/* left scrollbar styling */
+.anon-root [id$="left-panel"]::-webkit-scrollbar { width: 8px; }
+.anon-root [id$="left-panel"]::-webkit-scrollbar-thumb { background: #c9c9c9; border-radius: 4px; }
+.anon-root [id$="left-panel"]::-webkit-scrollbar-track { background: transparent; }
+
+/* RIGHT PANEL: fill height and split 50/50 */
+.anon-root [id$="right-panel"] {
+ height: 100%;
+ overflow: hidden !important;
+}
+
+/* Right containers stretch to full height */
+.anon-root .right-containers {
+ height: 100% !important;
+ min-height: 0 !important;
+ display: flex !important;
+ flex-direction: column !important;
+ gap: 12px !important;
+ overflow: hidden !important;
+}
+
+/* Each box gets equal share */
+.anon-root .right-box {
+ flex: 1 1 0 !important; /* equal split */
+ min-height: 0 !important;
+ display: flex !important;
+ flex-direction: column !important;
+
+ background: #ffffff;
+ border: 1px solid #e6e6e6;
+ border-radius: 8px;
+}
+
+/* Headers */
+.anon-root .right-header {
+ flex: 0 0 auto !important;
+ padding: 10px 12px;
+ border-bottom: 1px solid #eeeeee;
+ background: #ffffff;
+ z-index: 2;
+}
+
+.anon-root .right-header h3 {
+ margin: 0;
+ font-size: 18px;
+ font-weight: 700;
+}
+
+/* Body becomes internal scroll container */
+.anon-root .right-body {
+ flex: 1 1 auto !important;
+ min-height: 0 !important;
+ overflow-y: auto !important;
+ overflow-x: hidden !important;
+ padding: 10px 12px;
+}
+
+/* Preview table wrapper: allow scroll (both directions if needed) */
+.anon-root .table-wrap {
+ height: 100%;
+ min-height: 0 !important;
+ overflow: auto !important;
+}
+
+.anon-root table {
+ width: 100% !important;
+ border-collapse: collapse;
+}
+
+/* -----------------------------------------------------------------------
+ 3) Risk summary cards
+ ----------------------------------------------------------------------- */
+.anon-root .risk-summary-grid {
+ display: grid;
+ grid-template-columns: 1fr 1fr;
+ gap: 12px;
+ align-items: start;
+}
+
+.anon-root .summary-card {
+ border: 1px solid #e6e6e6;
+ border-radius: 8px;
+ padding: 10px 12px;
+ background: #fafafa;
+}
+
+/* -----------------------------------------------------------------------
+ 4) Gauges
+ ----------------------------------------------------------------------- */
+.anon-root .gauge .gauge-value,
+.anon-root .gauge .gauge-label {
+ display: none !important;
+}
+
+.anon-root #preview-gauges .gauge-box {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ text-align: center;
+}
+
+.anon-root #preview-gauges h4 {
+ margin: 0 0 6px 0;
+}
+
+/* -----------------------------------------------------------------------
+ 5) Copy buttons (module-scoped)
+ ----------------------------------------------------------------------- */
+.anon-root button[id^="copy_"] {
+ float: right;
+ font-size: 12px;
+ background-color: #e0e0e0;
+ border: none;
+ border-radius: 5px;
+ padding: 6px 10px;
+ box-shadow: 1px 1px 3px rgba(0,0,0,0.15);
+ cursor: pointer;
+ margin-bottom: 10px;
+ position: relative;
+ z-index: 10;
+}
+
+/* -----------------------------------------------------------------------
+ 6) Inline suppress icon button
+ ----------------------------------------------------------------------- */
+.anon-root .qid-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 8px;
+ margin-bottom: 6px;
+}
+
+.anon-root .icon-button {
+ background: #dc3545;
+ color: #ffffff;
+ border: none;
+ border-radius: 6px;
+ padding: 0;
+ cursor: pointer;
+ height: 32px;
+ width: 32px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+}
+
+.anon-root .icon-button:hover { opacity: 0.9; }
+.anon-root .icon-button .fa { pointer-events: none; }
+
+/* -----------------------------------------------------------------------
+ 7) Leaflet map sizing
+ ----------------------------------------------------------------------- */
+.anon-root [id$="geo_map"] {
+ height: 420px;
+ border: 1px solid #e6e6e6;
+ border-radius: 8px;
+ overflow: hidden;
+}
+
+/* -----------------------------------------------------------------------
+ 8) Download note
+ ----------------------------------------------------------------------- */
+.anon-root .download-note {
+ font-size: 12px;
+ color: #444444;
+ background: #f7f7f7;
+ border: 1px dashed #dddddd;
+ border-radius: 8px;
+ padding: 8px;
+ margin-top: 8px;
+}
+
+/* -----------------------------------------------------------------------
+ 9) Dark mode (toggle should add/remove .dark-mode on .anon-root)
+ ----------------------------------------------------------------------- */
+.anon-root.dark-mode {
+ background-color: #1e1e1e !important;
+ color: #ffffff;
+}
+
+.anon-root.dark-mode .right-box {
+ background: #2a2a2a;
+ border-color: #444444;
+}
+
+.anon-root.dark-mode .right-header {
+ background: #2a2a2a;
+ border-bottom-color: #444444;
+}
+
+.anon-root.dark-mode .summary-card {
+ background: #2e2e2e;
+ border-color: #444444;
+}
+
+.anon-root.dark-mode button[id^="copy_"] {
+ background-color: #444444;
+ color: #ffffff;
+}
+
+/* -----------------------------------------------------------------------
+ 10) Responsive adjustments
+ ----------------------------------------------------------------------- */
+@media (max-width: 992px) {
+ /* On smaller screens, let the page flow naturally */
+ .anon-root .anon-dashboard .tab-pane.active > .row {
+ height: auto;
+ overflow: visible !important;
+ }
+
+ .anon-root [id$="left-panel"] {
+ height: auto;
+ max-height: none;
+ }
+
+ .anon-root [id$="right-panel"] {
+ height: auto;
+ }
+
+ .anon-root .right-containers {
+ height: auto !important;
+ }
+
+ .anon-root .risk-summary-grid {
+ grid-template-columns: 1fr;
+ }
+}
diff --git a/WWW/clipboard.min.js b/WWW/clipboard.min.js
new file mode 100644
index 0000000..513aa38
--- /dev/null
+++ b/WWW/clipboard.min.js
@@ -0,0 +1,7 @@
+/*!
+ * clipboard.js v2.0.11
+ * https://clipboardjs.com/
+ *
+ * Licensed MIT © Zeno Rocha
+ */
+!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return function(n){var o={};function r(t){if(o[t])return o[t].exports;var e=o[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,r),e.l=!0,e.exports}return r.m=n,r.c=o,r.d=function(t,e,n){r.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:n})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"=
diff --git a/WWW/styles.css b/WWW/styles.css
index 72210d0..87b8bec 100644
--- a/WWW/styles.css
+++ b/WWW/styles.css
@@ -1,146 +1,362 @@
-/* Media query for tablets */
-@media (min-width: 577px) and (max-width: 992px) {
- .roundbuttons {
- width: 50%; /* Adjust the width for tablets */
- }
+/* ==========================================================================
+ APHRC — FINAL PERFECT CSS
+ - Outer sidebar transparent (no extra layer)
+ - Only inner pill visible
+ - Pill is dark blue/gray (AdminLTE-like)
+ - No dark ring around dashboard
+ ========================================================================== */
+
+/* =========================
+ 0) Palette / globals
+ ========================= */
+:root{
+ --aphrc-green: #7BC148;
+ --aphrc-cyan: #00BFC4;
+ --aphrc-text: #333333;
+ --aphrc-bg: #FFFFFF;
+
+ /* Layout constants */
+ --aphrc-topbar-height: 110px; /* adjust if needed */
+ --aphrc-sidebar-collapsed: 56px;
+ --aphrc-sidebar-expanded: 240px;
+ --aphrc-transition-ms: 320ms;
+
+ /* Pill styling */
+ --aphrc-pill-color: #1f2d33; /* dark blue/gray */
+ --aphrc-pill-radius: 18px;
+ --aphrc-pill-pad-y: 10px;
+ --aphrc-pill-pad-x: 8px;
}
+html, body{ height: 100%; }
-input[type="radio"] {
- width: 12px;
- height: 12px;
- accent-color: #17a2b8;
- border: 1px solid #7bc148; /* Green border */
- cursor: pointer;
+body{
+ color: var(--aphrc-text);
+ background: var(--aphrc-bg);
+ overflow-x: hidden;
}
+/* =========================
+ 1) Remove AdminLTE dark canvas/ring
+ ========================= */
+html,
+body,
+.wrapper,
+.content-wrapper,
+.right-side,
+.main-footer{
+ background-color: var(--aphrc-bg) !important;
+}
-.box.box-success > .box-header {
- background-color: #bde0a3 !important; /* lighter green header */
- }
+.wrapper,
+.content-wrapper,
+.right-side{
+ box-shadow: none !important;
+ border: 0 !important;
+}
+.content-wrapper{
+ border-left: 0 !important;
+}
-.footer {
- background-color: #7bc148;
- color: white;
- padding: 0px;
- text-align: center;
- }
-
- .socialform {
- text-align: center;
- padding: 10px;
- }
- .footer a {
- margin: 0px;
- color: white;
- }
- .footer a:hover {
- color: #337ab7;
- }
-
- hr {
- width: 100%; /* Adjust the percentage to control the length */
- }
-
-
- .header {
- background: white;
- color: #7bc148;
- padding: 10px;
- display: flex;
- align-items: center;
- text-align:center;
- }
-
- hr {
- width: 100%; /* Adjust the percentage to control the length */
- }
-
- .logo {
- flex-shrink: 0;
- margin-right: 20px;
- max-height: 80%;
- max-width: 80%;
- }
-
- .header-text h1 {
- font-size: 30px;
- font-weight: bold;
- text-align:center;
- }
-
- .header-text h3 {
- font-size: 20px;
-
- }
-
-
- /* navbar (rest of the header) */
- .skin-blue .main-header .navbar {
- background-color:green;
+/* =========================
+ 2) Remove AdminLTE header bar (you use .custom-header)
+ ========================= */
+.main-header,
+.main-header .navbar,
+.skin-blue .main-header .navbar,
+.skin-green .main-header .navbar{
+ display: none !important;
+ height: 0 !important;
+ min-height: 0 !important;
+ border: 0 !important;
+ margin: 0 !important;
+ padding: 0 !important;
+}
- }
+/* Make room for your custom header */
+body .wrapper{
+ padding-top: var(--aphrc-topbar-height) !important;
+}
- /* main sidebar */
- .skin-blue .main-sidebar {
- background-color: #7BC148;
+.content-wrapper, .right-side, .content{
+ padding-top: 0 !important;
+ margin-top: 0 !important;
+}
- }
- /* active selected tab in the sidebarmenu */
- .skin-blue .main-sidebar .sidebar .sidebar-menu .active a{
- background-color:#7BC148;
- }
+/* =========================
+ 3) Your custom header (.custom-header)
+ ========================= */
+.custom-header{
+ position: fixed !important;
+ top: 0 !important;
+ left: 0 !important;
+ right: 0 !important;
+ z-index: 3000 !important;
+ background-color: var(--aphrc-green) !important;
+ color: #fff !important;
+ padding: 10px !important;
+ margin: 0 !important;
+ border: 0 !important;
+ box-shadow: none !important;
+}
- /* other links in the sidebarmenu */
- .skin-blue .main-sidebar .sidebar .sidebar-menu a{
- background-color: #7BC148;
- border-color: #7BC148;
- color: #ffffff;
- }
+/* Keep your older .header (white) if used elsewhere (login etc.) */
+.header{
+ background: #fff;
+ color: var(--aphrc-green);
+ padding: 10px;
+ display: flex;
+ align-items: center;
+ text-align: center;
+}
- /* other links in the sidebarmenu when hovered */
- .skin-blue .main-sidebar .sidebar .sidebar-menu a:hover{
- background-color: #2c3b41;
- }
- /* toggle button when hovered */
- .skin-blue .main-header .navbar .sidebar-toggle:hover{
- background-color: #D37D28;
- }
- /* toggle button when hovered */
- .skin-blue .main-header .navbar .sidebar-toggle{
- margin:1em;
- }
+.logo{
+ flex-shrink: 0;
+ margin-right: 20px;
+ max-height: 80%;
+ max-width: 80%;
+}
- /* body */
- .content-wrapper, .right-side {
- background-color: #ffffff;
- padding-left: 1.5em;
+.header-text h1{
+ font-size: 30px;
+ font-weight: bold;
+ text-align: center;
+}
+.header-text h3{
+ font-size: 20px;
+}
+
+/* =========================
+ 4) Sidebar: transparent shell + dark pill only
+ ========================= */
+
+/* Sidebar shell (outer area) is TRANSPARENT and non-blocking */
+.main-sidebar,
+.left-side,
+.skin-green .main-sidebar,
+.skin-blue .main-sidebar,
+.main-sidebar .sidebar{
+ background: transparent !important;
+ background-color: transparent !important;
+ border: 0 !important;
+ box-shadow: none !important;
+}
+
+/* Fixed positioning and hover-expand */
+.main-sidebar{
+ position: fixed !important;
+ left: 0 !important;
+ top: var(--aphrc-topbar-height) !important;
+ height: calc(100vh - var(--aphrc-topbar-height)) !important;
+
+ width: var(--aphrc-sidebar-collapsed) !important;
+
+ overflow: visible !important; /* important: pill can have shadow */
+ z-index: 2000 !important;
+
+ transition: width var(--aphrc-transition-ms) cubic-bezier(0.22, 1, 0.36, 1) !important;
+ will-change: width;
+
+ pointer-events: none !important; /* outer shell won’t block footer/clicks */
+}
+
+.main-sidebar:hover{
+ width: var(--aphrc-sidebar-expanded) !important;
+}
+
+/* Content aligned to collapsed sidebar */
+.content-wrapper, .right-side, .main-footer{
+ margin-left: var(--aphrc-sidebar-collapsed) !important;
+ transition: margin-left var(--aphrc-transition-ms) cubic-bezier(0.22, 1, 0.36, 1) !important;
+}
+
+.main-sidebar:hover ~ .content-wrapper,
+.main-sidebar:hover ~ .right-side,
+.main-sidebar:hover ~ .main-footer{
+ margin-left: var(--aphrc-sidebar-expanded) !important;
+}
+
+/* Sidebar inner container */
+.main-sidebar .sidebar{
+ padding-top: 8px !important;
+ margin-top: 0 !important;
+ overflow: visible !important;
+}
+
+/* Make ONLY the pill interactive */
+.main-sidebar .sidebar-menu{
+ pointer-events: auto !important;
+}
+
+/* Stop old rules that paint every green */
+.skin-green .main-sidebar .sidebar .sidebar-menu a,
+.skin-blue .main-sidebar .sidebar .sidebar-menu a{
+ background: transparent !important;
+ background-color: transparent !important;
+ border-color: transparent !important;
+}
+
+/* The DARK PILL panel (height = content) */
+.main-sidebar .sidebar-menu{
+ background: var(--aphrc-pill-color) !important;
+ background-color: var(--aphrc-pill-color) !important;
+
+ margin: 12px var(--aphrc-pill-pad-x) !important;
+ padding: var(--aphrc-pill-pad-y) 0 !important;
+
+ border-radius: var(--aphrc-pill-radius) !important;
+ box-shadow: 0 10px 24px rgba(0,0,0,0.18) !important;
+
+ overflow: hidden !important;
+ list-style: none !important;
+}
+
+/* Menu items */
+.main-sidebar .sidebar-menu > li{ margin: 0 !important; }
+
+.main-sidebar .sidebar-menu > li > a{
+ height: 44px !important;
+ display: flex !important;
+ align-items: center !important;
+ box-sizing: border-box !important;
+ line-height: 44px !important;
+
+ /* collapsed: icons centered */
+ justify-content: center !important;
+ padding: 0 !important;
+
+ color: #ffffff !important;
+ border-radius: 12px !important;
+}
+
+/* Icons */
+.main-sidebar .sidebar-menu > li > a > i{
+ font-size: 16px !important;
+ margin: 0 !important;
+ width: auto !important;
+}
+
+/* Hover/active (dark theme) */
+.main-sidebar .sidebar-menu > li > a:hover{
+ background: rgba(255,255,255,0.10) !important;
+}
+.main-sidebar .sidebar-menu > li.active > a,
+.main-sidebar .sidebar-menu > li > a:focus{
+ background: rgba(255,255,255,0.16) !important;
+}
+
+/* Collapsed: hide labels */
+.main-sidebar .sidebar-menu > li > a > span,
+.main-sidebar .sidebar-menu > li > a > .pull-right-container{
+ display: none !important;
+}
+
+/* Expanded hover: show labels and align left */
+.main-sidebar:hover .sidebar-menu > li > a{
+ justify-content: flex-start !important;
+ padding: 0 15px !important;
+}
+.main-sidebar:hover .sidebar-menu > li > a > i{
+ margin-right: 10px !important;
+}
+.main-sidebar:hover .sidebar-menu > li > a > span,
+.main-sidebar:hover .sidebar-menu > li > a > .pull-right-container{
+ display: inline-block !important;
+}
+
+/* Small screens: tighten pill */
+@media (max-width: 576px){
+ .main-sidebar .sidebar-menu{
+ border-radius: 16px !important;
+ margin: 10px 6px !important;
}
-
-
- /*Tables*/
-
-.dataTables_wrapper , .dataTables_wrapper .dataTables_wrapper .dataTables_info, .dataTables_wrapper .dataTables_processing, .dataTables_wrapper{
- color:#000000;
- background-color:#ffffff;
- }
-
- thead {
- color: #ffffff;
- font-size:1.2em;
- background-color:#17a2b8;
- }
-
- tbody {
- color: #000000;
- font-size:1.2em;
- }
-
+}
+
+/* =========================
+ 5) Content area styling
+ ========================= */
+.content-wrapper, .right-side{
+ background-color: #ffffff !important;
+ padding-left: 1.5em !important;
+}
+
+/* =========================
+ 6) Tables / DataTables
+ ========================= */
+.dataTables_wrapper,
+.dataTables_wrapper .dataTables_info,
+.dataTables_wrapper .dataTables_processing{
+ color:#000000 !important;
+ background-color:#ffffff !important;
+}
+
+thead{
+ color: #ffffff !important;
+ font-size:1.2em !important;
+ background-color:#17a2b8 !important;
+}
+tbody{
+ color: #000000 !important;
+ font-size:1.2em !important;
+}
+
+table{
+ background-color: white !important;
+ color: black !important;
+ border: 1px solid #ddd !important;
+}
+th{
+ background-color: #17a2b8 !important;
+ color: white !important;
+}
+td{
+ background-color: #ffffff !important;
+ color: black !important;
+}
+
+/* =========================
+ 7) Forms / inputs
+ ========================= */
+input[type="radio"]{
+ width: 12px;
+ height: 12px;
+ accent-color: #17a2b8;
+ border: 1px solid var(--aphrc-green);
+ cursor: pointer;
+}
+
+.shiny-input-checkbox input[type='checkbox']{
+ background-color: #4CAF50;
+}
+
+.bootstrap-switch{
+ background-color: #4CAF50 !important;
+}
+
+@media (min-width: 577px) and (max-width: 992px){
+ .roundbuttons{ width: 50%; }
+}
+
+#primaryTabs{
+ color: var(--aphrc-green);
+ width: 100%;
+}
+
+/* =========================
+ 8) Upload / overview panels and cards
+ ========================= */
#upload_form{
padding-left: 1.2em;
justify-content: left;
align-items: left;
+
+ background: var(--aphrc-bg);
+ border: 2px solid var(--aphrc-green);
+ border-radius: 14px;
+ box-shadow: 0 10px 24px rgba(0,0,0,0.10);
+ padding: 14px 16px;
+ margin: 16px auto;
}
#OverViewMenu{
@@ -148,166 +364,161 @@ input[type="radio"] {
overflow-y: auto;
}
-table {
- background-color: white;
- color: black;
- border: 1px solid #ddd;
+.aphrc-row{
+ background: var(--aphrc-bg);
+ border: 2px solid var(--aphrc-green);
+ border-radius: 14px;
+ box-shadow: 0 10px 24px rgba(0,0,0,0.10);
+ padding: 14px 16px;
+ margin: 16px auto;
}
-th {
- background-color: #17a2b8;
- color: white;
+.aphrc-row:hover,
+.upload_form:hover,
+#aphrc-row1:hover{
+ box-shadow: 0 14px 28px rgba(0,0,0,0.12);
+ transform: translateY(-1px);
+ transition: all .18s ease-in-out;
}
-td {
- background-color: #ffffff;
- color: black;
+/* =========================
+ 9) Boxes
+ ========================= */
+.box.box-success > .box-header{
+ background-color: #bde0a3 !important;
}
-.shiny-input-checkbox input[type='checkbox'] {
- background-color: #4CAF50;
- }
-
-
- .bootstrap-switch{
- background-color: #4CAF50 !important;
+/* =========================
+ 10) Footer (your custom .footer)
+ ========================= */
+.footer{
+ background-color: var(--aphrc-green);
+ color: white;
+ padding: 0px;
+ text-align: center;
}
-#primaryTabs {
- color: #7bc148;
- width: 100%;
+.socialform{
+ text-align: center;
+ padding: 10px;
+}
+.footer a{
+ margin: 0px;
+ color: white;
+}
+.footer a:hover{
+ color: #337ab7;
}
+/* =========================
+ 11) HR
+ ========================= */
+hr{ width: 100%; }
-.auth-container {
+/* =========================
+ 12) Auth / login page UI
+ ========================= */
+.auth-container{
max-width: 400px;
margin: 6% auto;
padding: 30px 25px;
- border-color:#7bc148;
+ border-color: var(--aphrc-green);
border-radius: 15px;
- border-width:2px;
+ border-width: 2px;
background-color: #fff;
box-shadow: 0 0 25px rgba(0, 0, 0, 0.1);
}
-.auth-title {
+.auth-title{
font-weight: 600;
font-size: 24px;
margin-bottom: 20px;
}
-.toggle-buttons {
+.toggle-buttons{
display: flex;
justify-content: center;
margin-bottom: 20px;
}
-.toggle-buttons button {
+.toggle-buttons button{
margin: 0 5px;
- background-color: #7bc148;
+ background-color: var(--aphrc-green);
}
-#logoutID {
+#logoutID{
background-color: white;
color: gray;
font-size: 18px;
border: 2px solid gray;
cursor: pointer;
- /* Center icon */
display: flex;
justify-content: center;
align-items: center;
}
-
-#logoutBtn:hover {
- background-color: #c82333;
- }
-
-#loading_screen {
- position: fixed;
- top: 0; left: 0;
- width: 100%; height: 100%;
- background-color: #ffffff;
- z-index: 9999;
- display: flex;
- justify-content: center;
- align-items: center;
- font-size: 40px;
- color: #7bc148;
- }
-
-
-#loader svg circle {
- stroke-width: 20px !important; /* increase this for a thicker ring */
- }
-
- /* APHRC palette */
-:root{
- --aphrc-green: #7BC148;
- --aphrc-cyan: #00BFC4;
- --aphrc-text: #333333;
- --aphrc-bg: #FFFFFF;
-}
-/* Row container with shadow + border */
-.aphrc-row {
- background: var(--aphrc-bg);
- border: 2px solid var(--aphrc-green);
- border-radius: 14px;
- box-shadow: 0 10px 24px rgba(0,0,0,0.10);
- padding: 14px 16px; /* space around row contents */
- margin: 16px auto; /* vertical rhythm */
-}
-/* Optional subtle hover lift */
-.aphrc-row:hover{
- box-shadow: 0 14px 28px rgba(0,0,0,0.12);
- transform: translateY(-1px);
- transition: all .18s ease-in-out;
+#logoutBtn:hover{
+ background-color: #c82333;
}
-.aphrc-row:hover{
- box-shadow: 0 14px 28px rgba(0,0,0,0.12);
- transform: translateY(-1px);
- transition: all .18s ease-in-out;
+/* =========================
+ 13) Loading screen / loader
+ ========================= */
+#loading_screen{
+ position: fixed;
+ top: 0; left: 0;
+ width: 100%; height: 100%;
+ background-color: #ffffff;
+ z-index: 9999;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ font-size: 40px;
+ color: var(--aphrc-green);
}
-
-
-#upload_form{
- padding-left: 1.2em;
- justify-content: left;
- align-items: left;
- background: var(--aphrc-bg);
- border: 2px solid var(--aphrc-green);
- border-radius: 14px;
- box-shadow: 0 10px 24px rgba(0,0,0,0.10);
- padding: 14px 16px; /* space around row contents */
- margin: 16px auto; /* vertical rhythm */
+#loader svg circle{
+ stroke-width: 20px !important;
}
-.upload_form:hover{
- box-shadow: 0 14px 28px rgba(0,0,0,0.12);
- transform: translateY(-1px);
- transition: all .18s ease-in-out;
+/* =========================
+ 14) Progress bar
+ ========================= */
+.top-progress .progress-bar{
+ background-color: var(--aphrc-green) !important;
}
-
-
-#aphrc-row1:hover{
- box-shadow: 0 14px 28px rgba(0,0,0,0.12);
- transform: translateY(-1px);
- transition: all .18s ease-in-out;
+/* =========================
+ 15) Anonymization module isolation
+ ========================= */
+.anon-root table,
+.anon-root th,
+.anon-root td,
+.anon-root thead,
+.anon-root tbody{
+ all: unset;
}
-#aphrc-row1:hover{
- box-shadow: 0 14px 28px rgba(0,0,0,0.12);
- transform: translateY(-1px);
- transition: all .18s ease-in-out;
+.anon-root table{
+ width: 100% !important;
+ border-collapse: collapse !important;
+ display: table;
}
-.top-progress .progress-bar {
- background-color: #7bc148 !important;
- }
+.anon-root thead{ display: table-header-group; }
+.anon-root tbody{ display: table-row-group; }
+.anon-root tr{ display: table-row; }
+.anon-root th,
+.anon-root td{
+ display: table-cell;
+ padding: 6px 10px;
+ border: 1px solid #e6e6e6;
+ vertical-align: top;
+}
+.anon-root{
+ padding: 0 !important;
+ margin: 0 !important;
+}
diff --git a/modules/anonymization_functions.R b/modules/anonymization_functions.R
new file mode 100644
index 0000000..acc227b
--- /dev/null
+++ b/modules/anonymization_functions.R
@@ -0,0 +1,129 @@
+# anonymization_functions.R
+
+# Load required packages
+library(dplyr)
+library(sdcMicro)
+library(digest)
+library(uuid)
+
+# --- MASKING ---
+# Replace every character in character columns with "*"
+apply_masking <- function(df, cols){
+ df %>%
+ mutate(across(
+ all_of(cols),
+ ~ if (is.character(.x)) stringr::str_replace_all(.x, ".", "*") else .x
+ ))
+}
+attr(apply_masking, "description") <- "Replace each character in specified columns with '*'"
+
+# --- SUPPRESSION ---
+# Remove the specified columns entirely
+apply_suppression <- function(df, cols) {
+ df %>% select(-any_of(cols))
+}
+attr(apply_suppression, "description") <- "Drop the specified columns"
+
+# --- BUCKETING ---
+# Groups numeric values into fixed-width bins starting at 0
+apply_bucketing <- function(df, cols, bin_size) {
+ df %>%
+ mutate(across(
+ all_of(cols),
+ ~ {
+ max_val <- max(.x, na.rm = TRUE)
+ breaks <- seq(0, max_val + bin_size, by = bin_size)
+ labels <- paste0(head(breaks, -1), "-", breaks[-1] - 1)
+ cut(.x, breaks = breaks, labels = labels, include.lowest = TRUE, right = FALSE)
+ }
+ ))
+}
+attr(apply_bucketing, "description") <- "Bucket numeric columns into fixed-width intervals starting at 0"
+
+# --- PSEUDONYMIZATION ---
+# Hash each value in the specified columns with SHA-256
+apply_pseudonymization <- function(df, cols) {
+ df %>%
+ mutate(across(
+ all_of(cols),
+ ~ digest(as.character(.x), algo = "sha256")
+ ))
+}
+attr(apply_pseudonymization, "description") <- "Replace values with SHA-256 hash digests"
+
+# --- TOKENIZATION ---
+# Replace each value with a random 10-character alphanumeric token
+apply_tokenization <- function(df, cols, seed = 123) {
+ set.seed(seed)
+ df %>%
+ mutate(across(
+ all_of(cols),
+ ~ replicate(n(), paste0(sample(c(0:9, letters, LETTERS), 10, TRUE), collapse = ""))
+ ))
+}
+attr(apply_tokenization, "description") <- "Replace values with random 10-character tokens"
+
+# --- BASIC K-ANONYMITY ---
+# Keeps only those QID-groups of size >= k
+apply_k_anonymity <- function(df, cols, k) {
+ df %>%
+ group_by(across(all_of(cols))) %>%
+ mutate(.group_size = n()) %>%
+ ungroup() %>%
+ filter(.group_size >= k) %>%
+ select(-.group_size)
+}
+attr(apply_k_anonymity, "description") <- "Filter to only those groups whose size >= k"
+
+# --- EXTENDED K-ANONYMITY ---
+# Suppress direct IDs, generalize numeric QIDs, then enforce k-anonymity via sdcMicro
+apply_k_extended <- function(df, qids, k, bucket_cols = list(), direct_ids = character()) {
+ # 1) Suppress direct identifiers
+ df_proc <- df %>% select(-any_of(direct_ids))
+ df_proc$row_id_temp <- seq_len(nrow(df_proc))
+
+ # 2) Generalize numeric QIDs
+ for (col in names(bucket_cols)) {
+ df_proc[[col]] <- bucket_cols[[col]](df_proc[[col]])
+ }
+
+ # 3) sdcMicro k-anonymity
+ qids2 <- intersect(qids, names(df_proc))
+ sdcObj <- createSdcObj(dat = df_proc, keyVars = qids2)
+ sdcObj <- kAnon(sdcObj, k = k)
+ df_k <- extractManipData(sdcObj)
+
+ # 4) Combine matched & unmatched
+ matched <- df_k %>% select(-row_id_temp)
+ unmatched <- df_proc %>% filter(!row_id_temp %in% df_k$row_id_temp) %>% select(-row_id_temp)
+ bind_rows(matched, unmatched)
+}
+attr(apply_k_extended, "description") <- "Extended k-anonymity with bucketing & suppression"
+
+# --- L-DIVERSITY ---
+# Keep only those QID groups where the sensitive attribute has >= l distinct values
+apply_l_diversity <- function(df, qids, sensitive_attr, l) {
+ df %>%
+ group_by(across(all_of(qids))) %>%
+ filter(n_distinct(.data[[sensitive_attr]]) >= l) %>%
+ ungroup()
+}
+attr(apply_l_diversity, "description") <- "Filter groups to ensure >= l diversity in the sensitive attribute"
+
+# --- T-CLOSENESS ---
+# Keep only those QID groups whose distribution of the sensitive attribute is within threshold t of the global distribution
+apply_t_closeness <- function(df, qids, sensitive_attr, t) {
+ # compute global distribution
+ global_dist <- df %>% count(.data[[sensitive_attr]]) %>% mutate(prop = n / sum(n))
+
+ df %>%
+ group_by(across(all_of(qids))) %>%
+ filter({
+ local <- count(cur_data(), .data[[sensitive_attr]]) %>% mutate(prop = n / sum(n))
+ # total variation distance
+ tvd <- sum(abs(global_dist$prop - local$prop)) / 2
+ tvd <= t
+ }) %>%
+ ungroup()
+}
+attr(apply_t_closeness, "description") <- "Filter groups whose sensitive-attribute distribution is within t of the global distribution"
diff --git a/modules/mod_quant_anonymization.R b/modules/mod_quant_anonymization.R
new file mode 100644
index 0000000..f119dd1
--- /dev/null
+++ b/modules/mod_quant_anonymization.R
@@ -0,0 +1,362 @@
+# modules/mod_quant_anonymization.R
+
+# IMPORTANT: keep these global so anon_quant_server_logic() is visible
+source(.anon_server_mod, local = FALSE)
+
+# ---- Quantitative anonymization module ----
+mod_quant_anon_ui <- function(id) {
+ ns <- shiny::NS(id)
+
+ # Precompute namespaced IDs used in JS strings
+ ns_right_panel <- ns("right-panel")
+ ns_main_tabs <- ns("main_tabs")
+ ns_remove_ids <- ns("remove_ids") # must exist in identifier_selector output
+ ns_remove_inline <- ns("remove_ids_inline")
+ ns_method <- ns("method")
+
+ # ACE IDs
+ ns_r_ace <- ns("r_code_ace")
+ ns_stata_ace <- ns("stata_code_ace")
+ ns_py_ace <- ns("python_code_ace")
+ ns_copy_r <- ns("copy_r")
+ ns_copy_st <- ns("copy_stata")
+ ns_copy_py <- ns("copy_py")
+
+ # Single main section
+ ns_dashboard <- ns("dashboard")
+
+ shiny::div(
+ id = ns("anon_root"),
+ class = "anon-root",
+ shiny::tagList(
+ shinyjs::useShinyjs(),
+
+ shiny::tags$head(
+ # Font Awesome (icons)
+ shiny::tags$link(
+ rel = "stylesheet",
+ href = "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.0/css/all.min.css"
+ ),
+
+ # Copy helper JS (works with namespaced ACE ids)
+ shiny::tags$script(shiny::HTML("
+ function copyAce(editorId, btnId) {
+ var ed = ace.edit(editorId);
+ if(!ed) return;
+ var code = ed.getValue();
+ var ta = document.createElement('textarea');
+ ta.value = code;
+ document.body.appendChild(ta);
+ ta.select();
+ document.execCommand('copy');
+ document.body.removeChild(ta);
+ var btn = document.getElementById(btnId);
+ if(btn){
+ btn.innerText = 'Copied!';
+ setTimeout(function(){ btn.innerText = '📋 Copy'; }, 2000);
+ }
+ }
+ "))
+ ),
+
+ # ==== MAIN DASHBOARD (visible immediately) ====
+ shiny::div(
+ id = ns_dashboard,
+ class = "anon-dashboard",
+ style = "display:block; padding:20px;",
+
+ shiny::tabsetPanel(
+ id = ns_main_tabs,
+
+ # -------------------------- DASHBOARD TAB -------------------------------
+ shiny::tabPanel(
+ "Dashboard",
+ shiny::fluidRow(
+
+ # ---------- LEFT PANEL ----------
+ shiny::column(
+ width = 4, id = ns("left-panel"),
+
+ # Step 0 (platform dataset picker + optional file upload fallback)
+ shiny::wellPanel(
+ shiny::tags$h4("Step 0: Load Data"),
+
+ shiny::div(
+ style = "padding:8px; border:1px solid #eee; border-radius:6px;",
+ shiny::tags$strong("Use already uploaded and Selecetd data"),
+ shiny::br(),
+ shiny::uiOutput(ns("platform_dataset_picker")),
+ shiny::actionButton(
+ ns("use_platform_data"),
+ "Load selected dataset",
+ class = "btn btn-success btn-block"
+ ),
+ shiny::tags$small(
+ style = "display:block; margin-top:6px; color:#666;",
+ ""
+ )
+ ),
+
+ shiny::tags$hr(),
+ shiny::textOutput(ns("n_obs_text"))
+ ),
+
+
+ # Step 1
+ shiny::wellPanel(
+ shiny::div(
+ class = "qid-header",
+ shiny::tags$h4("Step 1: Remove Direct Identifiers"),
+ shiny::tags$button(
+ id = ns_remove_inline,
+ class = "icon-button",
+ title = "Suppress & Remove Identifiers",
+ onclick = sprintf("$('#%s').click();", ns_remove_ids),
+ shiny::tags$i(class = "fa fa-eraser")
+ )
+ ),
+ shiny::uiOutput(ns("identifier_selector"))
+ ),
+
+ # Step 2
+ shiny::wellPanel(
+ shiny::tags$h4("Step 2: Select Quasi-Identifiers"),
+ shiny::uiOutput(ns("bucket_ui"))
+ ),
+
+ # Step 3
+ shiny::wellPanel(
+ shiny::tags$h4("Step 3: Choose Method & Parameters"),
+ shiny::selectInput(
+ ns_method,
+ "Anonymization Method:",
+ choices = c(
+ "Masking", "Suppression", "Bucketing",
+ "Pseudonymization", "Tokenization",
+ "K-Anonymity",
+ "Generalization",
+ "Anonymize Coordinates"
+ ),
+ selected = "Masking"
+ ),
+
+ shiny::uiOutput(ns("extra_input")),
+
+ shiny::conditionalPanel(
+ condition = sprintf("input['%s'] == 'Generalization'", ns_method),
+ shiny::div(
+ style = "margin-top:8px; padding:10px; background:#f5f5f5; border-radius:4px;",
+ shiny::tags$strong("Generalization tips:"),
+ shiny::tags$ul(
+ shiny::tags$li("Use drag & drop for categorical variables."),
+ shiny::tags$li(shiny::HTML("Switch to Custom numeric ranges for numeric variables. Non-numeric fields will be hidden in that mode.")),
+ shiny::tags$li("Ranges must not overlap or touch.")
+ )
+ )
+ ),
+
+ shiny::fluidRow(
+ shiny::column(4, shiny::actionButton(ns("apply"), "Apply", class = "btn btn-primary btn-block")),
+ shiny::column(4, shiny::actionButton(ns("undo"), "Undo", class = "btn btn-warning btn-block")),
+ shiny::column(4, shiny::actionButton(ns("reset"), "Reset", class = "btn btn-danger btn-block"))
+ ),
+
+ # Advisor
+ shiny::wellPanel(
+ shiny::tags$h4("Bin-Size Advisor"),
+ shiny::selectInput(
+ ns("advisor_var"),
+ "Choose numeric variable:",
+ choices = NULL,
+ selectize = TRUE
+ ),
+ shiny::actionButton(ns("advisor_run"), "Show suggestions", class = "btn btn-info btn-block"),
+ shiny::tags$hr(),
+ shiny::tags$h5("Overall Histogram"),
+ shiny::plotOutput(ns("advisor_dist"), height = "220px"),
+ shiny::tags$h5("Summary Statistics"),
+ shiny::verbatimTextOutput(ns("advisor_summary")),
+ shiny::tags$h5("Bin-width Suggestions"),
+ shiny::tableOutput(ns("advisor_table")),
+ shiny::plotOutput(ns("advisor_plot"), height = "220px")
+ ),
+
+ # Downloads / Report
+ shiny::wellPanel(
+ shiny::checkboxInput(ns("dark_mode"), "Enable Dark Mode"),
+ shiny::tags$hr(),
+ shiny::tags$h4("Downloads"),
+ shiny::downloadButton(ns("download"), "Download CSV", class = "btn-block mb-1"),
+ shiny::downloadButton(ns("download_excel"), "Download Excel", class = "btn-block mb-1"),
+ shiny::downloadButton(ns("download_dta"), "Download Stata", class = "btn-block mb-1"),
+ shiny::downloadButton(ns("download_report"), "Download Risk Report As pdf", class = "btn-block mb-1"),
+ shiny::actionButton(ns("view_report"), "View Report", class = "btn btn-info btn-block"),
+
+ shiny::conditionalPanel(
+ condition = sprintf("input['%s'] == 'Anonymize Coordinates'", ns_method),
+ shiny::div(
+ class = "download-note",
+ shiny::HTML("Note: When Anonymize Coordinates is the last applied method, the standard downloads above will export the anonymized coordinates. No separate download is needed.")
+ )
+ ),
+
+ shiny::tags$hr(),
+ shiny::uiOutput(ns("k_report")),
+ shiny::tags$hr(),
+ shiny::tags$h4("Steps Log"),
+ shiny::verbatimTextOutput(ns("step_log"), placeholder = TRUE)
+ )
+ )
+ ),
+
+ # ---------- RIGHT PANEL ----------
+ shiny::column(
+ width = 8, id = ns_right_panel,
+ shiny::div(
+ class = "right-containers",
+
+ # === Container 1: Data Preview / Map ===
+ shiny::div(
+ class = "right-box",
+ shiny::div(class = "right-header", shiny::tags$h3("Data Preview")),
+ shiny::div(
+ class = "right-body",
+
+ shiny::conditionalPanel(
+ condition = sprintf("input['%s'] == 'Anonymize Coordinates'", ns_method),
+ leaflet::leafletOutput(ns("geo_map"), height = "420px")
+ ),
+
+ shiny::conditionalPanel(
+ condition = sprintf("input['%s'] != 'Anonymize Coordinates'", ns_method),
+ shiny::div(
+ id = ns("preview-table"),
+ class = "table-wrap",
+ shiny::tableOutput(ns("preview_merged"))
+ )
+ )
+ )
+ ),
+
+ # === Container 2: Risk Assessment ===
+ shiny::div(
+ class = "right-box",
+ shiny::div(class = "right-header", shiny::tags$h3("Risk Assessment")),
+ shiny::div(
+ class = "right-body",
+ shiny::div(
+ class = "risk-summary-grid",
+ shiny::div(class = "summary-card", shiny::uiOutput(ns("risk_before"))),
+ shiny::div(class = "summary-card", shiny::uiOutput(ns("risk_after")))
+ ),
+ shiny::tags$br(),
+ shiny::div(
+ id = ns("preview-gauges"),
+ shiny::fluidRow(
+ shiny::column(
+ width = 6,
+ shiny::div(
+ class = "gauge-box",
+ shiny::tags$h4("Risk Before"),
+ flexdashboard::gaugeOutput(ns("gauge_before"), height = "200px")
+ )
+ ),
+ shiny::column(
+ width = 6,
+ shiny::div(
+ class = "gauge-box",
+ shiny::tags$h4("Risk After"),
+ flexdashboard::gaugeOutput(ns("gauge_after"), height = "200px")
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ ),
+
+ # ----------------------------- CODES TAB ---------------------------------
+ shiny::tabPanel(
+ "Codes",
+ shiny::fluidRow(
+ shiny::column(
+ width = 4,
+ shiny::tags$h4("R Code"),
+ shinyAce::aceEditor(
+ outputId = ns_r_ace,
+ mode = "r",
+ theme = "chrome",
+ readOnly = TRUE,
+ height = "400px"
+ ),
+ shiny::actionButton(
+ ns_copy_r, "📋 Copy",
+ class = "btn btn-primary btn-block",
+ onclick = sprintf("copyAce('%s','%s')", ns_r_ace, ns_copy_r)
+ )
+ ),
+ shiny::column(
+ width = 4,
+ shiny::tags$h4("Stata Code"),
+ shinyAce::aceEditor(
+ outputId = ns_stata_ace,
+ mode = "stata",
+ theme = "chrome",
+ readOnly = TRUE,
+ height = "400px"
+ ),
+ shiny::actionButton(
+ ns_copy_st, "📋 Copy",
+ class = "btn btn-primary btn-block",
+ onclick = sprintf("copyAce('%s','%s')", ns_stata_ace, ns_copy_st)
+ )
+ ),
+ shiny::column(
+ width = 4,
+ shiny::tags$h4("Python Code"),
+ shinyAce::aceEditor(
+ outputId = ns_py_ace,
+ mode = "python",
+ theme = "chrome",
+ readOnly = TRUE,
+ height = "400px"
+ ),
+ shiny::actionButton(
+ ns_copy_py, "📋 Copy",
+ class = "btn btn-primary btn-block",
+ onclick = sprintf("copyAce('%s','%s')", ns_py_ace, ns_copy_py)
+ )
+ )
+ )
+ ),
+
+ # -------------------------- DESCRIPTIONS TAB ----------------------------
+ shiny::tabPanel(
+ "Descriptions",
+ shiny::uiOutput(ns("descriptions_panel"))
+ )
+ )
+ )
+ )
+ )
+}
+
+# Allow passing rv_current from main server to anon logic
+mod_quant_anon_server <- function(id, rv_current = NULL) {
+ shiny::moduleServer(id, function(input, output, session) {
+ if (exists("anon_quant_server_logic", mode = "function", inherits = TRUE)) {
+ get("anon_quant_server_logic", mode = "function", inherits = TRUE)(
+ input, output, session,
+ rv_current = rv_current
+ )
+ } else {
+ shiny::showNotification(
+ "anon_quant_server_logic() not found. Check server/anon/server_module_quant.R",
+ type = "error"
+ )
+ }
+ })
+}
diff --git a/server.R b/server.R
index dc5355a..f17d59d 100644
--- a/server.R
+++ b/server.R
@@ -12,735 +12,749 @@ function(input, output, session){
USER = user_auth(input, output, session)
authed_started = reactiveVal(FALSE)
-
- observeEvent(USER$logged_in, {
- req(isTRUE(USER$logged_in))
- waiter_show(
- html = spin_loaders(id = 3, style="width:56px;height:56px;color:#7BC148;"),
- color = "#FFF"
- )
-
- if (authed_started()) return()
- authed_started(TRUE)
-
- app_username = USER$username
- model_training_caret_pb = Attendant$new("model_training_caret_pb", hide_on_max = TRUE)
- data_upload_id_pb = Attendant$new("data_upload_id_pb", hide_on_max = TRUE)
- model_metrics_caret_pb = Attendant$new("model_metrics_caret_pb", hide_on_max = TRUE)
- deploy_models_caret_pb = Attendant$new("deploy_models_caret_pb", hide_on_max = TRUE)
- predict_models_caret_pb = Attendant$new("predict_models_caret_pb", hide_on_max = TRUE)
- predict_endpoint_models_caret_pb = Attendant$new("predict_endpoint_models_caret_pb", hide_on_max = TRUE)
- generate_research_questions_outcome_pb = Attendant$new("generate_research_questions_outcome_pb", hide_on_max = TRUE)
- generate_research_questions_additional_analysis_pb = Attendant$new("generate_research_questions_additional_analysis_pb", hide_on_max = TRUE)
- feature_engineering_perform_preprocess_pb = Attendant$new("feature_engineering_perform_preprocess_pb", hide_on_max = TRUE)
- model_training_caret_metrics_download_all_zip_pb = Attendant$new("model_training_caret_metrics_download_all_zip_pb", hide_on_max = TRUE)
-
- #### ---- Input validators ---------------------------------------------------
- source("server/input_validators.R")
- #### ---- Create needed folders for datasets and logs ------------------------
- source("server/create_dirs.R", local=TRUE)
-
- #### ---- FastAPI base URL réactif (lié au champ fastapi_base) ----
- source("R/utils_logging.R")
-
- DEFAULT_API_BASE <- Sys.getenv("FASTAPI_BASE", "http://api:8000")
-
- source("server/automl_controls_server.R")
- source("server/train_model_server.R")
- source("R/utils_api.R")
- source("server/deploy_model_server.R", local=TRUE)
- source("ui/deploy_model_ui.R", local=TRUE)
- source("server/predict_pycaret_server.R", local = TRUE)
-
- api_base <- reactive({
- val <- input$fastapi_base
- if (is.null(val) || !nzchar(trimws(val))) {
- DEFAULT_API_BASE
- } else {
- trimws(val)
- }
- })
-
-
- #### ---- Placeholder for reactive values ------------------------------------
- ##### -------- Currently selected dataset ------------------------------------
- rv_current = reactiveValues(
- dataset_id = NULL
- , metadata_id = NULL
- , data = NULL
- , selected_vars = NULL
- , selected_var = NULL
- , working_df = NULL
- , current_filter = NULL
- , current_filter_reset = NULL
- , manage_data_title_explore = NULL
- , missing_prop = NULL
- , has_missing_data_check=FALSE
- , manage_data_title_transform = NULL
- , merge_data_title_merge = NULL
- , transform_data_select_vars = NULL
- , vartype = NULL
- , changed_variable_type_log = NULL
- , transform_data_plot_df = NULL
- , renamed_variable_log = NULL
- , transform_data_quick_plot_out = NULL
- , recoded_variable_labels_log = NULL
- , missing_prop_df = NULL
- , created_missing_values_log = NULL
- , outlier_values = NULL
- , handle_missing_values_log = NULL
- , handle_outlier_values_log = NULL
- , transform_data_plot_missing_data_out = NULL
- , quick_explore_summary = NULL
- , max_tockens = 10000
- , seed = 9991
- , outcome = NULL
- , vartype_all = NULL
- )
-
- #####------------------Plots Reactive-------------------
-
- plots_sec_rv <- reactiveValues(
- plot_rv=NULL
- ,tab_rv=NULL
- ,plot_bivariate_auto=NULL
- ,plot_corr = NULL
- )
-
-
- ##### --------- Meta data ---------------------------------------------
- rv_metadata = reactiveValues(
- upload_logs = NULL
- , dataset_ids = NULL
- , data_summary_str = NULL
- , data_summary_skim = NULL
- , data_summary_summary = NULL
- , data_summary_summarytools = NULL
- )
-
- rv_database <- reactiveValues(schema_list = NULL
- , table_list = NULL
- , conn = NULL
- , schema_selected = NULL
- , table_selected = NULL
- , df_table = data.frame()
- , df_table_str = NULL
- , query_table_name = NULL
- , database_host = NULL
- , database_name = NULL
- , database_user = NULL
- , database_pass = NULL
- , details = NULL
- )
-
- ## ---
-
- rv_omop<- reactiveValues(
- url = NULL )
-
-
- ## LLM/GAI
- rv_generative_ai = reactiveValues(
- history = NULL
- )
-
- ## Reactive values for ML/AI module
- rv_ml_ai = reactiveValues(
- session_id = NULL
- , seed_value = NULL
- , dataset_id = NULL
- , analysis_type = NULL
- , task = NULL
- , outcome = NULL
- , model_formula = NULL
- , partition_ratio = NULL
- , predictors = NULL
- , excluded_predictors = NULL
- , ml_ai_setup_result = NULL
- , history = NULL
- , split = NULL
- , train_df = NULL
- , test_df = NULL
- , preprocessed = NULL
- , feature_engineering_preprocessed_log = NULL
- , at_least_one_model = FALSE
- )
-
- ## RV to hold UIs
- rv_ui_models = reactiveValues(
- model_training_caret_models_ols_check = NULL
- , model_training_caret_models_ols_advance_control = NULL
- )
-
- ## Train control caret
- rv_train_control_caret = reactiveValues(
- method = "cv"
- , number = 5
- , repeats = NA
- , search = "grid"
- , verboseIter = FALSE
- , savePredictions = FALSE
- , classProbs = TRUE
- )
-
- ## Trained models
- rv_training_models = reactiveValues(
- ols_model = NULL
- , ols_param = FALSE
- , ols_name = NULL
- , ols_trained_model = NULL
- , rf_model = NULL
- , rf_param = FALSE
- , rf_name = NULL
- , rf_trained_model = NULL
- , all_trained_models = NULL
- )
-
- rv_training_results = reactiveValues(
- models = NULL
- , train_metrics_df = NULL
- , test_metrics_objs = NULL
- , post_model_metrics_objs = NULL
- , control_parameters = NULL
- , tuned_parameters = NULL
- )
-
- # Update training results when a new model is trained
- automl_controls_server(
- id = "automl_controls",
- rv_current = rv_current,
- rv_ml_ai = rv_ml_ai,
- api_base = api_base
- )
-
- train_model_server(
- id = "train_model",
- rv_current = rv_current,
- rv_ml_ai = rv_ml_ai,
- api_base = api_base
- )
-
- # End update training
- # ---- (A) Detect if a complete PyCaret run is available (leaderboard displayed) ----
- .can_show_by_train <- reactive({
- curr_ds <- rv_current$dataset_id %||% rv_ml_ai$dataset_id
- isTRUE(rv_ml_ai$status %in% c("Finished","Finished_NoPlots")) &&
- !is.null(rv_ml_ai$leaderboard) && NROW(rv_ml_ai$leaderboard) > 0 &&
- isTRUE(nzchar(rv_ml_ai$trained_dataset_id)) &&
- identical(rv_ml_ai$trained_dataset_id, curr_ds)
- })
-
- observeEvent(rv_current$dataset_id, {
- # If we change the dataset, we clean up the transient state linked to the previous train.
- if (!identical(rv_current$dataset_id, rv_ml_ai$trained_dataset_id)) {
- rv_ml_ai$leaderboard <- NULL
- rv_ml_ai$leaderboard_full <- NULL
- rv_ml_ai$test_leaderboard <- NULL
- rv_ml_ai$test_leaderboard_full <- NULL
- rv_ml_ai$models <- NULL
- rv_ml_ai$eval_metrics <- NULL
- rv_ml_ai$eval_plots <- NULL
- rv_ml_ai$status <- NULL
- }
- }, ignoreInit = FALSE)
-
-
-
- # ---- (B) Datasets with pre-trained models (historical) ----
- .get_models_index_csv <- function() file.path(getwd(), app_username, "logs", "models", "index.csv")
- dataset_has_history <- reactive({
- idx <- .get_models_index_csv()
- if (!file.exists(idx)) return(FALSE)
- df <- tryCatch(read.csv(idx, stringsAsFactors = FALSE), error = function(e) NULL)
- if (is.null(df) || !"dataset_id" %in% names(df)) return(FALSE)
- ds <- rv_ml_ai$dataset_id %||% rv_current$dataset_id
- if (is.null(ds) || !nzchar(ds)) return(FALSE)
- any(df$dataset_id == ds & (df$framework %in% c("PyCaret","pycaret","Pycaret")))
- })
-
- # Expose known datasets for the Deploy module (used in its selector)
- observe({
- idx <- .get_models_index_csv()
- if (!file.exists(idx)) return(invisible(NULL))
- df <- tryCatch(read.csv(idx, stringsAsFactors = FALSE), error = function(e) NULL)
- if (is.null(df) || !"dataset_id" %in% names(df)) return(invisible(NULL))
- rv_current$known_datasets <- sort(unique(df$dataset_id))
- })
- .can_show_deploy <- reactive({
- isTRUE(.can_show_by_train()) || isTRUE(dataset_has_history())
- })
- # (keep this if you still use it on the JS side)
- output$can_show_deploy <- reactive({ .can_show_deploy() })
- outputOptions(output, "can_show_deploy", suspendWhenHidden = FALSE)
-
- # ---- Deploy tab UI container ----
- output$deploy_container <- renderUI({
- if (!isTRUE(.can_show_deploy())) return(NULL) # => onglet totalement vide
- column(width = 12, deployment_ui("deploy"))
- })
-
- outputOptions(output, "can_show_deploy", suspendWhenHidden = FALSE)
- ## Deployed models table
- rv_deploy_models = reactiveValues(
- trained_models_table = NULL
- )
-
- ## Deployed models
- rv_deployed_models = reactiveValues()
-
- ## Reactive values to stock AutoML leaderboard
- rv_automl <- reactiveValues(
- leaderboard = NULL
- )
-
- #### ---- App title ----------------------------------------------------
- source("server/header_footer_configs.R", local=TRUE)
- app_title()
-
- ###-------App Footer--------------------------
-
- footer_language_translation()
- ###-------Menu Translate---------
-
- menu_translation()
-
- #### ---- Change language ----------------------------------------------------
- output$change_language = change_language
-
- source("server/change_language_update.R", local = TRUE)
- change_language_update()
-
- #### ---- Upload data UI --------------------------------------------
- source("ui/upload_data.R", local = TRUE)
- output$upload_type = upload_type
-
- #### ---- Upload dataset/files UI --------------------------------------------
- source("server/input_files.R", local = TRUE)
- output$input_files = input_files
-
- #### ---- Show uploaded datasets UI --------------------------------------------
- output$show_uploaded = show_uploaded
-
- #### ---- Data upload form -----------------------------------------------
- source("ui/upload_form.R", local = TRUE)
- output$study_name = study_name
- output$study_country = study_country
- output$additional_info = additional_info
- output$submit_upload = submit_upload
-
- #### ---- Databse and API connection warning ---------------------
- db_api_con_future
-
- #### ---- Upload datasets ----------------------------------------
- source("server/upload_data.R", local = TRUE)
- upload_data_server()
-
- #### ---- Database integration ----------------------------------------
- source("server/database_integration.R", local = TRUE)
- database_integration_server()
-
- #### --- Database related form elements ---###
- output$db_type = db_type
- output$db_host = db_host
- output$db_name = db_name
- output$db_user = db_user
- output$db_pwd = db_pwd
- output$db_connect = db_connect
- output$db_schema_list = db_schema_list
- output$db_table_list = db_table_list
- output$db_custom_query = db_custom_query
- output$db_run_query = db_run_query
- output$db_port = db_port
- output$db_disconnect = db_disconnect
- output$db_tab_query = db_tab_query
- output$existing_connection = existing_connection
-
- source("server/omop_analysis.R", local = TRUE)
- omop_analysis_server()
-
- stderr_file_path <- file.path(getwd(), app_username, "output", "dq_stderr.txt")
-
- stderr_content<-create_log_reader(stderr_file_path)
-
-
- #### ---- Collect logs ----------------------------------------
- source("server/collect_logs.R", local = TRUE)
- collect_logs_server()
-
- #### ---- Display uploaded datasets ----------------------------------------
- source("server/display_uploaded_data.R", local = TRUE)
- display_uploaded_data_server()
-
- #### ---- Delete uploaded dadatsets ----------------------------------------
- source("server/delete_uploaded_data.R", local = TRUE)
- delete_uploaded_data_server()
-
- #### ---- Update logfiles based on existing datasets -------------------####
- source("server/update_logs.R", local = TRUE)
- update_logs_server()
-
- #### ---- Manage data ----------------------------------------------
-
- ##### ---- Select data ---------------------------------------------
- source("server/select_data.R", local = TRUE)
- select_data_server()
- manage_data_show_server()
-
- ##### ---- Display meta data for the selected dataset ---------------------------------------------
- source("server/display_metadata.R", local = TRUE)
- display_selected_metadata_server()
- reset_display_selected_metadata_server()
-
- ##### ---- Currently selected data ---------------------------------------------
- source("server/selected_data.R", local = TRUE)
- currently_selected_data_server()
-
- ##### ----Generate summary stats for the row data -------------------
- source("server/manage_data_summary.R", local = TRUE)
- generate_data_summary_server()
- display_data_summary_server()
-
- #### ----- Explore data -----------------------------------------------
- source("server/explore_data.R", local = TRUE)
- explore_data_server()
- explore_data_subactions_server()
-
- ##----User Defined Visualization section-----------------------
- source("ui/user_defined_visualization_header.R", local = TRUE)
- output$user_output_type = user_output_type
- output$user_tab_options = user_tab_options
- output$user_calc_var = user_calc_var
- #output$user_strata_var = user_strata_var
- output$user_row_var = user_row_var
- output$usr_create_cross_tab = usr_create_cross_tab
- output$user_download_table = user_download_table
-
- output$user_table_options = user_table_options
- output$user_report_numeric = user_report_numeric
- output$user_add_p_value = user_add_p_value
- output$user_add_confidence_interval = user_add_confidence_interval
- output$user_drop_missing_values = user_drop_missing_values
- output$user_table_caption = user_table_caption
-
- output$user_plot_options = user_plot_options
- output$user_select_variable_on_x_axis = user_select_variable_on_x_axis
- output$user_select_variable_on_y_axis = user_select_variable_on_y_axis
- output$user_plot_title = user_plot_title
- output$user_x_axis_label = user_x_axis_label
- output$user_y_axis_label = user_y_axis_label
- output$user_create = user_create
- output$user_download = user_download
-
- output$user_more_plot_options = user_more_plot_options
- output$user_transform_to_doughnut = user_transform_to_doughnut
- output$user_select_color_variable = user_select_color_variable
- output$user_select_group_variable = user_select_group_variable
- output$user_visual_orientation = user_visual_orientation
- output$user_bar_width = user_bar_width
- output$user_line_size = user_line_size
- output$user_select_line_type = user_select_line_type
- output$user_add_shapes = user_add_shapes
-
- output$user_select_shape = user_select_shape
- output$user_add_smooth = user_add_smooth
- output$user_display_confidence_interval = user_display_confidence_interval
- output$user_level_of_confidence_interval = user_level_of_confidence_interval
- output$user_select_line_join = user_select_line_join
- output$user_add_line_type = user_add_line_type
- output$user_add_points = user_add_points
- output$user_y_variable_summary_type = user_y_variable_summary_type
- output$user_title_position = user_title_position
-
- output$user_size_of_plot_title = user_size_of_plot_title
- output$user_axis_title_size = user_axis_title_size
- output$user_facet_title_size = user_facet_title_size
- output$user_axis_text_size = user_axis_text_size
- output$user_data_label_size = user_data_label_size
- output$user_x_axis_text_angle = user_x_axis_text_angle
- output$user_legend_title = user_legend_title
- output$user_stacked = user_stacked
- output$user_add_density = user_add_density
- output$user_remove_histogram = user_remove_histogram
- output$user_select_color_variable_single = user_select_color_variable_single
- output$user_select_color_parlet = user_select_color_parlet
- output$user_numeric_summary = user_numeric_summary
- output$user_tab_more_out = user_tab_more_out
- output$user_graph_more_out = user_tab_more_out
-
- output$bivariate_header_label = bivariate_header_label
- output$corrplot_header_label = corrplot_header_label
-
- output$user_select_bivariate_single_color = user_select_bivariate_single_color
- output$user_select_color_parlet_bivariate = user_select_color_parlet_bivariate
- output$user_select_color_parlet_corrplot = user_select_color_parlet_corrplot
- output$bivariate_plot_title = bivariate_plot_title
- output$corrplot_title = corrplot_title
- output$user_download_autoreport = user_download_autoreport
- output$user_generatebivriate = user_generatebivriate
-
-
-
- ##### ---- Explore data actions ----------------------------------
- explore_data_actions_server()
-
- ##### ---- Filter data --------------------------------------------
- explore_data_filter_server()
- explore_data_apply_filter_server()
- explore_data_current_filter_server()
-
- ##### ---- Show/display -------------------------------------------------------
- explore_show_data_server()
- explore_data_reset_current_filter_server()
-
- ##### ---- Compute proportion of missing data ---------------------------
- explore_missing_data_server()
-
- ##### ---- Select variables ---------------------------------------------
- explore_data_select_variables_server()
- explore_data_selected_variables_server()
-
- ##### ---- Update data -----------------------------------------------
- explore_data_update_data_server()
-
- #### ---- Transform variables -------------------------------------- ####
- source("server/transform_data.R", local = TRUE)
-
- ##### ---- Select variables to transform ------------------------------------###
- transform_data_select_variables_server()
-
- ##### ---- Change type -----------------------------------------------###
- transform_data_change_type_server()
-
- ##### ---- Rename variables -----------------------------------------------###
- transform_data_rename_variables_server()
-
- ##### ---- Recode/change value labels ---------------------------------------###
- transform_data_quick_explore_recode_server()
-
- ##### ---- Handle missing data ---------------------------------------###
- transform_data_create_missing_values_server()
-
- ##### ---- Identify outliers ---------------------------------------###
- transform_data_identify_outliers_server()
-
- ##### ---- Handle missing values ---------------------------------------###
- transform_data_handle_missing_values_server()
-
- ##### ---- Plot transform data ----------------------------------------------###
- transform_data_quick_explore_plot_server()
-
- ##### ---- Plot missing data ----------------------------------------------###
- transform_data_plot_missing_data_server()
-
- #### ---- Combine datasets with the existing one --------------------------------------####
- source("server/combine_data.R", local = TRUE)
-
- ##### ---- List of internal data ------------------------------------------####
- combine_data_list_datasets()
-
- ##### ---- Combine data options ------------------------------------------####
- combine_data_type()
-
- ##### ---- Combine data mtch type ------------------------------------------####
- combine_data_match_type()
-
- ##### ---- Combine data variables matched --------------------####
- combine_data_variable_matching()
-
- #### ----- Perform matching ---------------------------------####
- combine_data_perform_variable_match()
-
- ##### ---- Combine data perform merging --------------------####
- combine_data_perform_merging()
-
- #### ---- Reset combine data --------------------------------####
- combine_data_reset()
-
- ##### ---- Control Custom visualizations ------------------ #####
- source("server/user_defined_visualization.R", local = TRUE)
- user_defined_server()
-
- ### ------- OMOP ------------------------------------------ #####
-
- #### ----- Cohort Constructor ---------#####
- source("server/run_cohort_pipeline.R", local = TRUE)
- run_cohort_pipeline()
-
- #### ----- Feature Extraction ---------#####
- source("server/feature_extraction_pipeline.R", local = TRUE)
- feature_extraction_pipeline()
-
- #### ---- Achilles Integration -------------------####
-
- source("server/run_achilles.R", local = TRUE)
- achilles_integration_server()
-
- ### ---- OMOP CDM Summaries---------------------------####
- source("server/omop_summaries.R", local = TRUE)
- omopVizServer()
-
- #### ---- Generate Research Questions --------------------------------------####
- source("server/research_questions.R", local = TRUE)
- generate_research_questions_choices()
-
-
- ##### ---- API Token ------------------ ####
- generate_research_questions_api_token()
- generate_research_questions_api_store()
-
- #### ---- Addional prompts --------------- ####
- generate_research_questions_additional()
-
- #### ---- Generate insights using Gemini --------------- ####
- generate_research_questions_gemini()
-
- #### ---- Machine learning and AI --------------- ####
-
- ##### ----- Set ML/AI UI ------------------- ####
- source("server/setup_models.R", local=TRUE)
- setup_models_ui()
-
- ##### ----- Preprocessing ------------------- ####
- source("server/feature_engineering.R", local=TRUE)
-
- #### Preprocessing ------------------------------------------- ####
- feature_engineering_perform_preprocess_server()
-
- #### ------ Missing value imputation -------------------------- ####
- feature_engineering_recipe_server()
- feature_engineering_impute_missing_server()
-
- #### ----- Modelling framework --------------------------------- ####
-
- source("server/modelling_framework.R", local=TRUE)
- modelling_framework_choices()
-
- #### ----- Model setup ----------------------------------------- ####
- source("server/model_training_setup.R", local=TRUE)
- model_training_setup_server()
-
- #### ----- Caret models --------------------------------------- ####
- source("server/model_training_caret_models.R", local=TRUE)
-
- ## LM/GLM
- model_training_caret_models_ols_server()
-
- ## RF
- model_training_caret_models_rf_server()
-
- ## GBM
- model_training_caret_models_gbm_server()
-
- ## xgbTree
- model_training_caret_models_xgbTree_server()
-
- ## xgbLinear
- model_training_caret_models_xgbLinear_server()
-
- ## svmRadial
- model_training_caret_models_svmRadial_server()
-
- ## svmLinear
- model_training_caret_models_svmLinear_server()
-
- ## svmPoly
- model_training_caret_models_svmPoly_server()
-
- ## glmnet
- model_training_caret_models_glmnet_server()
-
- ## LASSO
- model_training_caret_models_lasso_server()
-
- ## Ridge
- model_training_caret_models_ridge_server()
-
- ## KNN
- model_training_caret_models_knn_server()
-
- ## NNET
- model_training_caret_models_nnet_server()
-
- ## TREEBAG
- model_training_caret_models_treebag_server()
-
- ## avNNet
- model_training_caret_models_avNNet_server()
-
- ## PLS
- model_training_caret_models_pls_server()
-
- ## GAM
- model_training_caret_models_gam_server()
-
- #### ----- Train all models ----------------------------------- ####
- source("server/train_caret_models.R", local=TRUE)
- model_training_caret_train_all_server()
-
- #### ----- Compare trained models ------------------------------ ####
- source("server/compare_trained_caret_models.R", local=TRUE)
- model_training_caret_train_metrics_server()
-
- #### ----- Deploy trained models ------------------------------- ####
- source("server/deploy_trained_caret_models.R", local=TRUE)
- deploy_trained_caret_models()
-
- #### ---- Predict using no-code models ------------------------ ####
- source("server/predict_trained_caret_models.R", local=TRUE)
- predict_trained_caret_models()
-
- #### ---- PyCaret Integration (API) ----------------------------------------------------
-
- # New ADD
- rv_ml_ai <- rv_ml_ai %||% reactiveValues(target = NULL, outcome = NULL)
- rv_current <- rv_current %||% reactiveValues(target = NULL)
-
- deployment_server(id="deploy",rv_ml_ai=rv_ml_ai,rv_current = rv_current,api_base=api_base)
- predict_pycaret_server("predict_pycaret", api_base , rv_current, rv_ml_ai)
-
- # END NEW ADD
- #### ---- Call current dataset for FastAPI ---------------------------------------------------
- source("server/automl_server.R", local=TRUE)
- automl_server("automl_module", rv_current, rv_ml_ai)
-
- observe({
- req(!is.null(rv_ml_ai$modelling_framework)) # Check if value exist
-
- if (tolower(rv_ml_ai$modelling_framework) == "pycaret") {
- output$automl_module_ui <- renderUI({
- automl_ui("automl_module")
- })
- } else {
- output$automl_module_ui <- renderUI({
- h4("")
- })
- }
- })
-
- observeEvent(input$modelling_framework_choices, {
- rv_ml_ai$framework <- tolower(input$modelling_framework_choices %||% "")
- }, ignoreInit = FALSE)
-
- #### ---- Deep Learning Server ----- ###
- source("server/deep_learning.R", local=TRUE)
- deep_learning()
-
- #### ---- Reset various components --------------------------------------####
- ## Various components come before this
- source("server/resets.R", local = TRUE)
-
- ##### ---- Reset on delete or language change ------------------- ####
- reset_data_server()
-
- #### ---- Activate required fields --------------------------------------####
- iv$enable()
- iv_url$enable()
- iv_ml$enable()
-
- waiter_hide()
+ observeEvent(USER$logged_in, {
+ req(isTRUE(USER$logged_in))
+ waiter_show(
+ html = spin_loaders(id = 3, style="width:56px;height:56px;color:#7BC148;"),
+ color = "#FFF"
+ )
+
+ if (authed_started()) return()
+ authed_started(TRUE)
+
+ app_username = USER$username
+
+ # ---- Anonymization sources ----
+ source("modules/anonymization_functions.R", local = FALSE)
+ source("server/anon/server_module_quant.R", local = FALSE)
+ source("modules/mod_quant_anonymization.R", local = FALSE)
+ # ---- ----
+
+ model_training_caret_pb = Attendant$new("model_training_caret_pb", hide_on_max = TRUE)
+ data_upload_id_pb = Attendant$new("data_upload_id_pb", hide_on_max = TRUE)
+ model_metrics_caret_pb = Attendant$new("model_metrics_caret_pb", hide_on_max = TRUE)
+ deploy_models_caret_pb = Attendant$new("deploy_models_caret_pb", hide_on_max = TRUE)
+ predict_models_caret_pb = Attendant$new("predict_models_caret_pb", hide_on_max = TRUE)
+ predict_endpoint_models_caret_pb = Attendant$new("predict_endpoint_models_caret_pb", hide_on_max = TRUE)
+ generate_research_questions_outcome_pb = Attendant$new("generate_research_questions_outcome_pb", hide_on_max = TRUE)
+ generate_research_questions_additional_analysis_pb = Attendant$new("generate_research_questions_additional_analysis_pb", hide_on_max = TRUE)
+ feature_engineering_perform_preprocess_pb = Attendant$new("feature_engineering_perform_preprocess_pb", hide_on_max = TRUE)
+ model_training_caret_metrics_download_all_zip_pb = Attendant$new("model_training_caret_metrics_download_all_zip_pb", hide_on_max = TRUE)
+
+ #### ---- Input validators ---------------------------------------------------
+ source("server/input_validators.R")
+ #### ---- Create needed folders for datasets and logs ------------------------
+ source("server/create_dirs.R", local=TRUE)
+
+ #### ---- FastAPI base URL réactif (lié au champ fastapi_base) ----
+ source("R/utils_logging.R")
+
+ DEFAULT_API_BASE <- Sys.getenv("FASTAPI_BASE", "http://api:8000")
+
+ source("server/automl_controls_server.R")
+ source("server/train_model_server.R")
+ source("R/utils_api.R")
+ source("server/deploy_model_server.R", local=TRUE)
+ source("ui/deploy_model_ui.R", local=TRUE)
+ source("server/predict_pycaret_server.R", local = TRUE)
+
+ api_base <- reactive({
+ val <- input$fastapi_base
+ if (is.null(val) || !nzchar(trimws(val))) {
+ DEFAULT_API_BASE
+ } else {
+ trimws(val)
+ }
+ })
+
+
+ #### ---- Placeholder for reactive values ------------------------------------
+ ##### -------- Currently selected dataset ------------------------------------
+ rv_current = reactiveValues(
+ dataset_id = NULL
+ , metadata_id = NULL
+ , data = NULL
+ , selected_vars = NULL
+ , selected_var = NULL
+ , working_df = NULL
+ , current_filter = NULL
+ , current_filter_reset = NULL
+ , manage_data_title_explore = NULL
+ , missing_prop = NULL
+ , has_missing_data_check=FALSE
+ , manage_data_title_transform = NULL
+ , merge_data_title_merge = NULL
+ , transform_data_select_vars = NULL
+ , vartype = NULL
+ , changed_variable_type_log = NULL
+ , transform_data_plot_df = NULL
+ , renamed_variable_log = NULL
+ , transform_data_quick_plot_out = NULL
+ , recoded_variable_labels_log = NULL
+ , missing_prop_df = NULL
+ , created_missing_values_log = NULL
+ , outlier_values = NULL
+ , handle_missing_values_log = NULL
+ , handle_outlier_values_log = NULL
+ , transform_data_plot_missing_data_out = NULL
+ , quick_explore_summary = NULL
+ , max_tockens = 10000
+ , seed = 9991
+ , outcome = NULL
+ , vartype_all = NULL
+ )
+
+ # ---- Anonymization module server ----
+ if (exists("mod_quant_anon_server", mode = "function", inherits = TRUE)) {
+ mod_quant_anon_server("quant_anon", rv_current = rv_current)
+ } else {
+ showNotification("mod_quant_anon_server() not found. Check modules/mod_quant_anonymization.R", type = "error")
+ }
+ # ---- ----
+
+ #####------------------Plots Reactive-------------------
+
+ plots_sec_rv <- reactiveValues(
+ plot_rv=NULL
+ ,tab_rv=NULL
+ ,plot_bivariate_auto=NULL
+ ,plot_corr = NULL
+ )
+
+
+ ##### --------- Meta data ---------------------------------------------
+ rv_metadata = reactiveValues(
+ upload_logs = NULL
+ , dataset_ids = NULL
+ , data_summary_str = NULL
+ , data_summary_skim = NULL
+ , data_summary_summary = NULL
+ , data_summary_summarytools = NULL
+ )
+
+ rv_database <- reactiveValues(schema_list = NULL
+ , table_list = NULL
+ , conn = NULL
+ , schema_selected = NULL
+ , table_selected = NULL
+ , df_table = data.frame()
+ , df_table_str = NULL
+ , query_table_name = NULL
+ , database_host = NULL
+ , database_name = NULL
+ , database_user = NULL
+ , database_pass = NULL
+ , details = NULL
+ )
+
+ ## ---
+
+ rv_omop<- reactiveValues(
+ url = NULL )
+
+
+ ## LLM/GAI
+ rv_generative_ai = reactiveValues(
+ history = NULL
+ )
+
+ ## Reactive values for ML/AI module
+ rv_ml_ai = reactiveValues(
+ session_id = NULL
+ , seed_value = NULL
+ , dataset_id = NULL
+ , analysis_type = NULL
+ , task = NULL
+ , outcome = NULL
+ , model_formula = NULL
+ , partition_ratio = NULL
+ , predictors = NULL
+ , excluded_predictors = NULL
+ , ml_ai_setup_result = NULL
+ , history = NULL
+ , split = NULL
+ , train_df = NULL
+ , test_df = NULL
+ , preprocessed = NULL
+ , feature_engineering_preprocessed_log = NULL
+ , at_least_one_model = FALSE
+ )
+
+ ## RV to hold UIs
+ rv_ui_models = reactiveValues(
+ model_training_caret_models_ols_check = NULL
+ , model_training_caret_models_ols_advance_control = NULL
+ )
+
+ ## Train control caret
+ rv_train_control_caret = reactiveValues(
+ method = "cv"
+ , number = 5
+ , repeats = NA
+ , search = "grid"
+ , verboseIter = FALSE
+ , savePredictions = FALSE
+ , classProbs = TRUE
+ )
+
+ ## Trained models
+ rv_training_models = reactiveValues(
+ ols_model = NULL
+ , ols_param = FALSE
+ , ols_name = NULL
+ , ols_trained_model = NULL
+ , rf_model = NULL
+ , rf_param = FALSE
+ , rf_name = NULL
+ , rf_trained_model = NULL
+ , all_trained_models = NULL
+ )
+
+ rv_training_results = reactiveValues(
+ models = NULL
+ , train_metrics_df = NULL
+ , test_metrics_objs = NULL
+ , post_model_metrics_objs = NULL
+ , control_parameters = NULL
+ , tuned_parameters = NULL
+ )
+
+ # Update training results when a new model is trained
+ automl_controls_server(
+ id = "automl_controls",
+ rv_current = rv_current,
+ rv_ml_ai = rv_ml_ai,
+ api_base = api_base
+ )
+
+ train_model_server(
+ id = "train_model",
+ rv_current = rv_current,
+ rv_ml_ai = rv_ml_ai,
+ api_base = api_base
+ )
+
+ # End update training
+ # ---- (A) Detect if a complete PyCaret run is available (leaderboard displayed) ----
+ .can_show_by_train <- reactive({
+ curr_ds <- rv_current$dataset_id %||% rv_ml_ai$dataset_id
+ isTRUE(rv_ml_ai$status %in% c("Finished","Finished_NoPlots")) &&
+ !is.null(rv_ml_ai$leaderboard) && NROW(rv_ml_ai$leaderboard) > 0 &&
+ isTRUE(nzchar(rv_ml_ai$trained_dataset_id)) &&
+ identical(rv_ml_ai$trained_dataset_id, curr_ds)
+ })
+
+ observeEvent(rv_current$dataset_id, {
+ # If we change the dataset, we clean up the transient state linked to the previous train.
+ if (!identical(rv_current$dataset_id, rv_ml_ai$trained_dataset_id)) {
+ rv_ml_ai$leaderboard <- NULL
+ rv_ml_ai$leaderboard_full <- NULL
+ rv_ml_ai$test_leaderboard <- NULL
+ rv_ml_ai$test_leaderboard_full <- NULL
+ rv_ml_ai$models <- NULL
+ rv_ml_ai$eval_metrics <- NULL
+ rv_ml_ai$eval_plots <- NULL
+ rv_ml_ai$status <- NULL
+ }
+ }, ignoreInit = FALSE)
+
+
+
+ # ---- (B) Datasets with pre-trained models (historical) ----
+ .get_models_index_csv <- function() file.path(getwd(), app_username, "logs", "models", "index.csv")
+ dataset_has_history <- reactive({
+ idx <- .get_models_index_csv()
+ if (!file.exists(idx)) return(FALSE)
+ df <- tryCatch(read.csv(idx, stringsAsFactors = FALSE), error = function(e) NULL)
+ if (is.null(df) || !"dataset_id" %in% names(df)) return(FALSE)
+ ds <- rv_ml_ai$dataset_id %||% rv_current$dataset_id
+ if (is.null(ds) || !nzchar(ds)) return(FALSE)
+ any(df$dataset_id == ds & (df$framework %in% c("PyCaret","pycaret","Pycaret")))
+ })
+
+ # Expose known datasets for the Deploy module (used in its selector)
+ observe({
+ idx <- .get_models_index_csv()
+ if (!file.exists(idx)) return(invisible(NULL))
+ df <- tryCatch(read.csv(idx, stringsAsFactors = FALSE), error = function(e) NULL)
+ if (is.null(df) || !"dataset_id" %in% names(df)) return(invisible(NULL))
+ rv_current$known_datasets <- sort(unique(df$dataset_id))
+ })
+ .can_show_deploy <- reactive({
+ isTRUE(.can_show_by_train()) || isTRUE(dataset_has_history())
+ })
+ # (keep this if you still use it on the JS side)
+ output$can_show_deploy <- reactive({ .can_show_deploy() })
+ outputOptions(output, "can_show_deploy", suspendWhenHidden = FALSE)
+
+ # ---- Deploy tab UI container ----
+ output$deploy_container <- renderUI({
+ if (!isTRUE(.can_show_deploy())) return(NULL) # => onglet totalement vide
+ column(width = 12, deployment_ui("deploy"))
+ })
+
+ outputOptions(output, "can_show_deploy", suspendWhenHidden = FALSE)
+ ## Deployed models table
+ rv_deploy_models = reactiveValues(
+ trained_models_table = NULL
+ )
+
+ ## Deployed models
+ rv_deployed_models = reactiveValues()
+
+ ## Reactive values to stock AutoML leaderboard
+ rv_automl <- reactiveValues(
+ leaderboard = NULL
+ )
+
+ #### ---- App title ----------------------------------------------------
+ source("server/header_footer_configs.R", local=TRUE)
+ app_title()
+
+ ###-------App Footer--------------------------
+
+ footer_language_translation()
+ ###-------Menu Translate---------
+
+ menu_translation()
+
+ #### ---- Change language ----------------------------------------------------
+ output$change_language = change_language
+
+ source("server/change_language_update.R", local = TRUE)
+ change_language_update()
+
+ #### ---- Upload data UI --------------------------------------------
+ source("ui/upload_data.R", local = TRUE)
+ output$upload_type = upload_type
+
+ #### ---- Upload dataset/files UI --------------------------------------------
+ source("server/input_files.R", local = TRUE)
+ output$input_files = input_files
+
+ #### ---- Show uploaded datasets UI --------------------------------------------
+ output$show_uploaded = show_uploaded
+
+ #### ---- Data upload form -----------------------------------------------
+ source("ui/upload_form.R", local = TRUE)
+ output$study_name = study_name
+ output$study_country = study_country
+ output$additional_info = additional_info
+ output$submit_upload = submit_upload
+
+ #### ---- Databse and API connection warning ---------------------
+ db_api_con_future
+
+ #### ---- Upload datasets ----------------------------------------
+ source("server/upload_data.R", local = TRUE)
+ upload_data_server()
+
+ #### ---- Database integration ----------------------------------------
+ source("server/database_integration.R", local = TRUE)
+ database_integration_server()
+
+ #### --- Database related form elements ---###
+ output$db_type = db_type
+ output$db_host = db_host
+ output$db_name = db_name
+ output$db_user = db_user
+ output$db_pwd = db_pwd
+ output$db_connect = db_connect
+ output$db_schema_list = db_schema_list
+ output$db_table_list = db_table_list
+ output$db_custom_query = db_custom_query
+ output$db_run_query = db_run_query
+ output$db_port = db_port
+ output$db_disconnect = db_disconnect
+ output$db_tab_query = db_tab_query
+ output$existing_connection = existing_connection
+
+ source("server/omop_analysis.R", local = TRUE)
+ omop_analysis_server()
+
+ stderr_file_path <- file.path(getwd(), app_username, "output", "dq_stderr.txt")
+
+ stderr_content<-create_log_reader(stderr_file_path)
+
+
+ #### ---- Collect logs ----------------------------------------
+ source("server/collect_logs.R", local = TRUE)
+ collect_logs_server()
+
+ #### ---- Display uploaded datasets ----------------------------------------
+ source("server/display_uploaded_data.R", local = TRUE)
+ display_uploaded_data_server()
+
+ #### ---- Delete uploaded dadatsets ----------------------------------------
+ source("server/delete_uploaded_data.R", local = TRUE)
+ delete_uploaded_data_server()
+
+ #### ---- Update logfiles based on existing datasets -------------------####
+ source("server/update_logs.R", local = TRUE)
+ update_logs_server()
+
+ #### ---- Manage data ----------------------------------------------
+
+ ##### ---- Select data ---------------------------------------------
+ source("server/select_data.R", local = TRUE)
+ select_data_server()
+ manage_data_show_server()
+
+ ##### ---- Display meta data for the selected dataset ---------------------------------------------
+ source("server/display_metadata.R", local = TRUE)
+ display_selected_metadata_server()
+ reset_display_selected_metadata_server()
+
+ ##### ---- Currently selected data ---------------------------------------------
+ source("server/selected_data.R", local = TRUE)
+ currently_selected_data_server()
+
+ ##### ----Generate summary stats for the row data -------------------
+ source("server/manage_data_summary.R", local = TRUE)
+ generate_data_summary_server()
+ display_data_summary_server()
+
+ #### ----- Explore data -----------------------------------------------
+ source("server/explore_data.R", local = TRUE)
+ explore_data_server()
+ explore_data_subactions_server()
+
+ ##----User Defined Visualization section-----------------------
+ source("ui/user_defined_visualization_header.R", local = TRUE)
+ output$user_output_type = user_output_type
+ output$user_tab_options = user_tab_options
+ output$user_calc_var = user_calc_var
+ #output$user_strata_var = user_strata_var
+ output$user_row_var = user_row_var
+ output$usr_create_cross_tab = usr_create_cross_tab
+ output$user_download_table = user_download_table
+
+ output$user_table_options = user_table_options
+ output$user_report_numeric = user_report_numeric
+ output$user_add_p_value = user_add_p_value
+ output$user_add_confidence_interval = user_add_confidence_interval
+ output$user_drop_missing_values = user_drop_missing_values
+ output$user_table_caption = user_table_caption
+
+ output$user_plot_options = user_plot_options
+ output$user_select_variable_on_x_axis = user_select_variable_on_x_axis
+ output$user_select_variable_on_y_axis = user_select_variable_on_y_axis
+ output$user_plot_title = user_plot_title
+ output$user_x_axis_label = user_x_axis_label
+ output$user_y_axis_label = user_y_axis_label
+ output$user_create = user_create
+ output$user_download = user_download
+
+ output$user_more_plot_options = user_more_plot_options
+ output$user_transform_to_doughnut = user_transform_to_doughnut
+ output$user_select_color_variable = user_select_color_variable
+ output$user_select_group_variable = user_select_group_variable
+ output$user_visual_orientation = user_visual_orientation
+ output$user_bar_width = user_bar_width
+ output$user_line_size = user_line_size
+ output$user_select_line_type = user_select_line_type
+ output$user_add_shapes = user_add_shapes
+
+ output$user_select_shape = user_select_shape
+ output$user_add_smooth = user_add_smooth
+ output$user_display_confidence_interval = user_display_confidence_interval
+ output$user_level_of_confidence_interval = user_level_of_confidence_interval
+ output$user_select_line_join = user_select_line_join
+ output$user_add_line_type = user_add_line_type
+ output$user_add_points = user_add_points
+ output$user_y_variable_summary_type = user_y_variable_summary_type
+ output$user_title_position = user_title_position
+
+ output$user_size_of_plot_title = user_size_of_plot_title
+ output$user_axis_title_size = user_axis_title_size
+ output$user_facet_title_size = user_facet_title_size
+ output$user_axis_text_size = user_axis_text_size
+ output$user_data_label_size = user_data_label_size
+ output$user_x_axis_text_angle = user_x_axis_text_angle
+ output$user_legend_title = user_legend_title
+ output$user_stacked = user_stacked
+ output$user_add_density = user_add_density
+ output$user_remove_histogram = user_remove_histogram
+ output$user_select_color_variable_single = user_select_color_variable_single
+ output$user_select_color_parlet = user_select_color_parlet
+ output$user_numeric_summary = user_numeric_summary
+ output$user_tab_more_out = user_tab_more_out
+ output$user_graph_more_out = user_tab_more_out
+
+ output$bivariate_header_label = bivariate_header_label
+ output$corrplot_header_label = corrplot_header_label
+
+ output$user_select_bivariate_single_color = user_select_bivariate_single_color
+ output$user_select_color_parlet_bivariate = user_select_color_parlet_bivariate
+ output$user_select_color_parlet_corrplot = user_select_color_parlet_corrplot
+ output$bivariate_plot_title = bivariate_plot_title
+ output$corrplot_title = corrplot_title
+ output$user_download_autoreport = user_download_autoreport
+ output$user_generatebivriate = user_generatebivriate
+
+
+
+ ##### ---- Explore data actions ----------------------------------
+ explore_data_actions_server()
+
+ ##### ---- Filter data --------------------------------------------
+ explore_data_filter_server()
+ explore_data_apply_filter_server()
+ explore_data_current_filter_server()
+
+ ##### ---- Show/display -------------------------------------------------------
+ explore_show_data_server()
+ explore_data_reset_current_filter_server()
+
+ ##### ---- Compute proportion of missing data ---------------------------
+ explore_missing_data_server()
+
+ ##### ---- Select variables ---------------------------------------------
+ explore_data_select_variables_server()
+ explore_data_selected_variables_server()
+
+ ##### ---- Update data -----------------------------------------------
+ explore_data_update_data_server()
+
+ #### ---- Transform variables -------------------------------------- ####
+ source("server/transform_data.R", local = TRUE)
+
+ ##### ---- Select variables to transform ------------------------------------###
+ transform_data_select_variables_server()
+
+ ##### ---- Change type -----------------------------------------------###
+ transform_data_change_type_server()
+
+ ##### ---- Rename variables -----------------------------------------------###
+ transform_data_rename_variables_server()
+
+ ##### ---- Recode/change value labels ---------------------------------------###
+ transform_data_quick_explore_recode_server()
+
+ ##### ---- Handle missing data ---------------------------------------###
+ transform_data_create_missing_values_server()
+
+ ##### ---- Identify outliers ---------------------------------------###
+ transform_data_identify_outliers_server()
+
+ ##### ---- Handle missing values ---------------------------------------###
+ transform_data_handle_missing_values_server()
+
+ ##### ---- Plot transform data ----------------------------------------------###
+ transform_data_quick_explore_plot_server()
+
+ ##### ---- Plot missing data ----------------------------------------------###
+ transform_data_plot_missing_data_server()
+
+ #### ---- Combine datasets with the existing one --------------------------------------####
+ source("server/combine_data.R", local = TRUE)
+
+ ##### ---- List of internal data ------------------------------------------####
+ combine_data_list_datasets()
+
+ ##### ---- Combine data options ------------------------------------------####
+ combine_data_type()
+
+ ##### ---- Combine data mtch type ------------------------------------------####
+ combine_data_match_type()
+
+ ##### ---- Combine data variables matched --------------------####
+ combine_data_variable_matching()
+
+ #### ----- Perform matching ---------------------------------####
+ combine_data_perform_variable_match()
+
+ ##### ---- Combine data perform merging --------------------####
+ combine_data_perform_merging()
+
+ #### ---- Reset combine data --------------------------------####
+ combine_data_reset()
+
+ ##### ---- Control Custom visualizations ------------------ #####
+ source("server/user_defined_visualization.R", local = TRUE)
+ user_defined_server()
+
+ ### ------- OMOP ------------------------------------------ #####
+
+ #### ----- Cohort Constructor ---------#####
+ source("server/run_cohort_pipeline.R", local = TRUE)
+ run_cohort_pipeline()
+
+ #### ----- Feature Extraction ---------#####
+ source("server/feature_extraction_pipeline.R", local = TRUE)
+ feature_extraction_pipeline()
+
+ #### ---- Achilles Integration -------------------####
+
+ source("server/run_achilles.R", local = TRUE)
+ achilles_integration_server()
+
+ ### ---- OMOP CDM Summaries---------------------------####
+ source("server/omop_summaries.R", local = TRUE)
+ omopVizServer()
+
+ #### ---- Generate Research Questions --------------------------------------####
+ source("server/research_questions.R", local = TRUE)
+ generate_research_questions_choices()
+
+
+ ##### ---- API Token ------------------ ####
+ generate_research_questions_api_token()
+ generate_research_questions_api_store()
+
+ #### ---- Addional prompts --------------- ####
+ generate_research_questions_additional()
+
+ #### ---- Generate insights using Gemini --------------- ####
+ generate_research_questions_gemini()
+
+ #### ---- Machine learning and AI --------------- ####
+
+ ##### ----- Set ML/AI UI ------------------- ####
+ source("server/setup_models.R", local=TRUE)
+ setup_models_ui()
+
+ ##### ----- Preprocessing ------------------- ####
+ source("server/feature_engineering.R", local=TRUE)
+
+ #### Preprocessing ------------------------------------------- ####
+ feature_engineering_perform_preprocess_server()
+
+ #### ------ Missing value imputation -------------------------- ####
+ feature_engineering_recipe_server()
+ feature_engineering_impute_missing_server()
+
+ #### ----- Modelling framework --------------------------------- ####
+
+ source("server/modelling_framework.R", local=TRUE)
+ modelling_framework_choices()
+
+ #### ----- Model setup ----------------------------------------- ####
+ source("server/model_training_setup.R", local=TRUE)
+ model_training_setup_server()
+
+ #### ----- Caret models --------------------------------------- ####
+ source("server/model_training_caret_models.R", local=TRUE)
+
+ ## LM/GLM
+ model_training_caret_models_ols_server()
+
+ ## RF
+ model_training_caret_models_rf_server()
+
+ ## GBM
+ model_training_caret_models_gbm_server()
+
+ ## xgbTree
+ model_training_caret_models_xgbTree_server()
+
+ ## xgbLinear
+ model_training_caret_models_xgbLinear_server()
+
+ ## svmRadial
+ model_training_caret_models_svmRadial_server()
+
+ ## svmLinear
+ model_training_caret_models_svmLinear_server()
+
+ ## svmPoly
+ model_training_caret_models_svmPoly_server()
+
+ ## glmnet
+ model_training_caret_models_glmnet_server()
+
+ ## LASSO
+ model_training_caret_models_lasso_server()
+
+ ## Ridge
+ model_training_caret_models_ridge_server()
+
+ ## KNN
+ model_training_caret_models_knn_server()
+
+ ## NNET
+ model_training_caret_models_nnet_server()
+
+ ## TREEBAG
+ model_training_caret_models_treebag_server()
+
+ ## avNNet
+ model_training_caret_models_avNNet_server()
+
+ ## PLS
+ model_training_caret_models_pls_server()
+
+ ## GAM
+ model_training_caret_models_gam_server()
+
+ #### ----- Train all models ----------------------------------- ####
+ source("server/train_caret_models.R", local=TRUE)
+ model_training_caret_train_all_server()
+
+ #### ----- Compare trained models ------------------------------ ####
+ source("server/compare_trained_caret_models.R", local=TRUE)
+ model_training_caret_train_metrics_server()
+
+ #### ----- Deploy trained models ------------------------------- ####
+ source("server/deploy_trained_caret_models.R", local=TRUE)
+ deploy_trained_caret_models()
+
+ #### ---- Predict using no-code models ------------------------ ####
+ source("server/predict_trained_caret_models.R", local=TRUE)
+ predict_trained_caret_models()
+
+ #### ---- PyCaret Integration (API) ----------------------------------------------------
+
+ # New ADD
+ rv_ml_ai <- rv_ml_ai %||% reactiveValues(target = NULL, outcome = NULL)
+ rv_current <- rv_current %||% reactiveValues(target = NULL)
+
+ deployment_server(id="deploy",rv_ml_ai=rv_ml_ai,rv_current = rv_current,api_base=api_base)
+ predict_pycaret_server("predict_pycaret", api_base , rv_current, rv_ml_ai)
+
+ # END NEW ADD
+ #### ---- Call current dataset for FastAPI ---------------------------------------------------
+ source("server/automl_server.R", local=TRUE)
+ automl_server("automl_module", rv_current, rv_ml_ai)
+
+ observe({
+ req(!is.null(rv_ml_ai$modelling_framework)) # Check if value exist
+
+ if (tolower(rv_ml_ai$modelling_framework) == "pycaret") {
+ output$automl_module_ui <- renderUI({
+ automl_ui("automl_module")
+ })
+ } else {
+ output$automl_module_ui <- renderUI({
+ h4("")
+ })
+ }
+ })
+
+ observeEvent(input$modelling_framework_choices, {
+ rv_ml_ai$framework <- tolower(input$modelling_framework_choices %||% "")
+ }, ignoreInit = FALSE)
+
+ #### ---- Deep Learning Server ----- ###
+ source("server/deep_learning.R", local=TRUE)
+ deep_learning()
+
+ #### ---- Reset various components --------------------------------------####
+ ## Various components come before this
+ source("server/resets.R", local = TRUE)
+
+ ##### ---- Reset on delete or language change ------------------- ####
+ reset_data_server()
+
+ #### ---- Activate required fields --------------------------------------####
+ iv$enable()
+ iv_url$enable()
+ iv_ml$enable()
+
+ waiter_hide()
}, ignoreInit = FALSE)
-
+
waiter_hide()
}
diff --git a/server/anon/server_module_quant.R b/server/anon/server_module_quant.R
new file mode 100644
index 0000000..23d55b9
--- /dev/null
+++ b/server/anon/server_module_quant.R
@@ -0,0 +1,1687 @@
+# server_module_quant.R
+
+anon_quant_server_logic <- function(input, output, session, rv_current = NULL) {
+
+ # ======================================================================
+ # Quant Anon — Module-safe server with coordinate-safe risk + QGIS export
+ # ======================================================================
+
+ options(shiny.maxRequestSize = 1024^3)
+
+ # ---- DEBUG: show full stack traces inside Shiny ----
+ options(shiny.fullstacktrace = TRUE)
+
+ if (requireNamespace("rlang", quietly = TRUE)) {
+ options(error = function() {
+ message("\n--- ERROR TRACE (rlang::last_trace) ---\n")
+ try(print(rlang::last_trace()), silent = TRUE)
+ message("\n--- BASE TRACEBACK() ---\n")
+ traceback(2)
+ })
+ } else {
+ options(error = function() {
+ message("\n--- BASE TRACEBACK() ---\n")
+ traceback(2)
+ })
+ }
+
+ ns <- session$ns # IMPORTANT: used for dynamic UI ids created in renderUI()
+
+ `%||%` <- function(a, b) if (is.null(a)) b else a
+
+ # CRITICAL FIX: avoid htmlwidgets::validate masking shiny::validate
+ vld <- shiny::validate
+ need <- shiny::need
+
+ get_after_col <- function(q, df_after) {
+ anon_col <- paste0(q, "_anon")
+ if (anon_col %in% names(df_after)) anon_col else q
+ }
+
+ # (IDs kept harmlessly for backwards-compat; landing is removed in UI)
+ landing_dom <- ns("landing")
+ dashboard_dom <- ns("dashboard")
+
+ shinyjs::inlineCSS("
+ .gauge-value { display: none !important; }
+ #preview-table { padding-right: 0 !important; }
+ ")
+
+ # ---------- Helpers -----------------------------------------------------
+ parse_ranges_text <- function(text) {
+ entries <- unlist(strsplit(text, ";", fixed = TRUE))
+ parsed <- lapply(entries, function(e) {
+ e <- trimws(e)
+ if (e == "") return(NULL)
+
+ parts <- strsplit(e, ":", fixed = TRUE)[[1]]
+ if (length(parts) != 2) stop(sprintf("Invalid entry '%s': must be of form lower-upper:label", e))
+
+ range_part <- trimws(parts[1])
+ label <- trimws(parts[2])
+ if (label == "") stop(sprintf("Label missing in entry '%s'", e))
+
+ bounds <- strsplit(range_part, "-", fixed = TRUE)[[1]]
+ if (length(bounds) == 1) {
+ num <- as.numeric(bounds[1])
+ if (is.na(num)) stop(sprintf("Non-numeric bound in '%s'", range_part))
+ lower <- num
+ upper <- num
+ } else if (length(bounds) == 2) {
+ lower_raw <- trimws(bounds[1])
+ upper_raw <- trimws(bounds[2])
+ lower <- if (lower_raw == "" || lower_raw == "-Inf") -Inf else as.numeric(lower_raw)
+ upper <- if (upper_raw == "" || upper_raw == "Inf") Inf else as.numeric(upper_raw)
+ if (is.na(lower) || is.na(upper)) stop(sprintf("Non-numeric bound in '%s'", range_part))
+ } else {
+ stop(sprintf("Range part '%s' is malformed", range_part))
+ }
+
+ if (lower > upper) stop(sprintf("Range '%s' has lower > upper", label))
+ list(lower = lower, upper = upper, label = label)
+ })
+
+ parsed <- Filter(Negate(is.null), parsed)
+ if (length(parsed) == 0) stop("No valid ranges found.")
+
+ df <- do.call(rbind, lapply(parsed, function(p) {
+ data.frame(lower = p$lower, upper = p$upper, label = p$label, stringsAsFactors = FALSE)
+ }))
+
+ df <- df[order(df$lower, df$upper), ]
+ if (nrow(df) > 1) {
+ for (i in seq_len(nrow(df) - 1)) {
+ if (df$upper[i] >= df$lower[i + 1]) {
+ stop(sprintf(
+ "Ranges '%s' and '%s' overlap or touch; make upper of one < lower of next.",
+ df$label[i], df$label[i + 1]
+ ))
+ }
+ }
+ }
+ df
+ }
+
+ is_finite_bbox <- function(bbox) {
+ if (is.null(bbox)) return(FALSE)
+ all(is.finite(unname(as.numeric(bbox))))
+ }
+
+ nz_pos <- function(x, default = 100) {
+ x2 <- suppressWarnings(as.numeric(x))
+ if (length(x2) == 0 || is.na(x2) || !is.finite(x2) || x2 <= 0) default else x2
+ }
+
+ is_geo_name <- function(nm) {
+ tolower(nm) %in% c("lat", "latitude", "lon", "long", "longitude", "geom", "geometry",
+ "wkt", "cell_wkt", "polygon")
+ }
+
+ risk_qids <- function(qids) {
+ qids[!sapply(qids, is_geo_name)]
+ }
+
+ drop_geo_cols <- function(dat) {
+ drop <- intersect(names(dat), names(dat)[sapply(names(dat), is_geo_name)])
+ drop <- setdiff(drop, c("cell_wkt", "wkt", "polygon"))
+ dat[, setdiff(names(dat), drop), drop = FALSE]
+ }
+
+ # ----- CRS safety layer -------------------------------------------------
+ is_valid_utm_epsg <- function(epsg) {
+ if (is.na(epsg)) return(FALSE)
+ epsg %in% c(3857, 4326) ||
+ (epsg >= 32601 && epsg <= 32660) ||
+ (epsg >= 32701 && epsg <= 32760)
+ }
+
+ guess_utm_epsg <- function(lon, lat) {
+ lon <- lon[is.finite(lon)]
+ lat <- lat[is.finite(lat)]
+ if (!length(lon) || !length(lat)) return(3857)
+ zone <- floor((mean(lon) + 180) / 6) + 1
+ epsg <- if (mean(lat) >= 0) 32600 + zone else 32700 + zone
+ if (!is_valid_utm_epsg(epsg)) 3857 else epsg
+ }
+
+ safe_transform <- function(obj, target_epsg) {
+ if (inherits(obj, "sf") && is.na(sf::st_crs(obj))) {
+ suppressWarnings(sf::st_crs(obj) <- 4326)
+ }
+ if (!is_valid_utm_epsg(target_epsg)) target_epsg <- 3857
+
+ out <- try(suppressWarnings(sf::st_transform(obj, target_epsg)), silent = TRUE)
+ if (inherits(out, "try-error")) {
+ out <- try(suppressWarnings(sf::st_transform(obj, 4326)), silent = TRUE)
+ if (inherits(out, "try-error")) {
+ out <- try(suppressWarnings(sf::st_transform(obj, 3857)), silent = TRUE)
+ if (inherits(out, "try-error")) return(obj)
+ }
+ }
+ out
+ }
+
+ build_grid_agg <- function(pts_utm, cell_m) {
+ if (!inherits(pts_utm, "sf")) stop("pts_utm must be sf")
+ if (!inherits(sf::st_geometry(pts_utm), "sfc_POINT")) stop("pts_utm must be POINT geometries")
+ if (is.na(sf::st_crs(pts_utm))) sf::st_crs(pts_utm) <- 3857
+
+ cell_m <- nz_pos(cell_m, default = 100)
+
+ bbox <- suppressWarnings(sf::st_bbox(pts_utm))
+ if (!is_finite_bbox(bbox)) {
+ return(sf::st_sf(geometry = sf::st_sfc(), n = integer(0), crs = sf::st_crs(pts_utm)))
+ }
+
+ grd <- try(sf::st_make_grid(pts_utm, cellsize = c(cell_m, cell_m), square = TRUE), silent = TRUE)
+ if (inherits(grd, "try-error") || length(grd) == 0) {
+ return(sf::st_sf(geometry = sf::st_sfc(), n = integer(0), crs = sf::st_crs(pts_utm)))
+ }
+
+ grd_sf <- sf::st_sf(geometry = grd)
+ sf::st_crs(grd_sf) <- sf::st_crs(pts_utm)
+
+ ints <- try(sf::st_intersects(grd_sf, pts_utm), silent = TRUE)
+ if (inherits(ints, "try-error")) {
+ return(sf::st_sf(geometry = sf::st_sfc(), n = integer(0), crs = sf::st_crs(pts_utm)))
+ }
+
+ grd_sf$n <- lengths(ints)
+ grd_sf <- grd_sf[grd_sf$n > 0, , drop = FALSE]
+ grd_sf <- sf::st_make_valid(grd_sf)
+ grd_sf <- sf::st_zm(grd_sf, drop = TRUE, what = "ZM")
+ if (nrow(grd_sf)) grd_sf <- grd_sf[!sf::st_is_empty(grd_sf), , drop = FALSE]
+
+ if (!nrow(grd_sf)) {
+ return(sf::st_sf(geometry = sf::st_sfc(), n = integer(0), crs = sf::st_crs(pts_utm)))
+ }
+ grd_sf
+ }
+
+ if (requireNamespace("sf", quietly = TRUE)) {
+ sf::sf_use_s2(FALSE)
+ }
+
+ # ---------- Reactives ---------------------------------------------------
+ raw_data <- reactiveVal()
+ data <- reactiveVal()
+ anon_data <- reactiveVal()
+ anonymized_cols <- reactiveVal(character())
+ initial_qids <- reactiveVal(character())
+ import_snip <- reactiveVal(character())
+ import_stata_snip <- reactiveVal(character())
+ import_py_snip <- reactiveVal(character())
+ r_steps <- reactiveVal(character())
+ stata_steps <- reactiveVal(character())
+ python_steps <- reactiveVal(character())
+ previous_data_stack <- reactiveVal(list())
+ previous_r_stack <- reactiveVal(list())
+ previous_stata_stack <- reactiveVal(list())
+ previous_py_stack <- reactiveVal(list())
+ log_steps <- reactiveVal(character())
+ k_bins <- reactiveVal(list())
+ gen_assigned <- reactiveVal(list())
+ previous_gen_assigned_stack <- reactiveVal(list())
+ last_method <- reactiveVal(NULL)
+ report_temp <- reactiveVal(NULL)
+
+ r_obj_name <- reactiveVal(NULL)
+ py_obj_name <- reactiveVal(NULL)
+
+ geo_preview_layer <- reactiveVal(NULL)
+ geo_after_layer <- reactiveVal(NULL)
+ geo_lat_col <- reactiveVal(NULL)
+ geo_lon_col <- reactiveVal(NULL)
+
+ # Perf: cache risk metrics so they don't recompute many times
+ risk_before_metrics <- reactiveVal(NULL)
+ risk_after_metrics <- reactiveVal(NULL)
+
+ # ======================================================================
+ # Landing removed: NO continue/manual handlers here
+ # (Descriptions tab still works via output$descriptions_panel below)
+ # ======================================================================
+
+ # ======================================================================
+ # Guard against missing input$method/input$selected_qids
+ # ======================================================================
+ safe_method <- reactive(input$method %||% "Masking")
+ safe_selected_qids <- reactive(input$selected_qids %||% character())
+
+ # ======================================================================
+ # NEW: Load data from MAIN PLATFORM (rv_current) instead of upload
+ # UI must provide actionButton(ns("use_platform_data"), ...)
+ # ======================================================================
+ get_platform_df <- function() {
+ if (is.null(rv_current)) return(NULL)
+ # Try common locations used in your app
+ df <- rv_current$working_df %||% rv_current$data %||% rv_current$df %||% NULL
+ if (is.null(df)) return(NULL)
+ tryCatch(as.data.frame(df, stringsAsFactors = FALSE), error = function(e) NULL)
+ }
+
+ init_state_from_df <- function(df, source_label = "platform_dataset") {
+ # object names for code panes
+ r_obj_name(make.names(source_label))
+ py_obj_name(gsub("[^A-Za-z0-9_]", "_", source_label))
+
+ raw_data(df)
+ data(df)
+ anon_data(NULL)
+
+ anonymized_cols(character())
+ initial_qids(character())
+
+ previous_data_stack(list())
+ previous_r_stack(list())
+ previous_stata_stack(list())
+ previous_py_stack(list())
+
+ log_steps(character())
+ k_bins(list())
+
+ gen_assigned(list())
+ previous_gen_assigned_stack(list())
+
+ geo_preview_layer(NULL)
+ geo_after_layer(NULL)
+ geo_lat_col(NULL)
+ geo_lon_col(NULL)
+
+ risk_before_metrics(NULL)
+ risk_after_metrics(NULL)
+
+ # Code panes: platform load note
+ import_snip(c(
+ "# Using dataset already loaded in the main platform",
+ "library(dplyr)",
+ "# (No file import step needed here)"
+ ))
+ r_steps(import_snip())
+
+ import_stata_snip(c(
+ "// Using dataset already loaded in the main platform",
+ "// (No file import step needed here)"
+ ))
+ stata_steps(import_stata_snip())
+
+ import_py_snip(c(
+ "# Using dataset already loaded in the main platform",
+ "# (No file import step needed here)"
+ ))
+ python_steps(import_py_snip())
+
+ shinyAce::updateAceEditor(session, "r_code_ace", value = paste(r_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "stata_code_ace", value = paste(stata_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "python_code_ace", value = paste(python_steps(), collapse = "\n\n"))
+ }
+
+ observeEvent(input$use_platform_data, {
+ df <- get_platform_df()
+ if (is.null(df) || !nrow(df)) {
+ showNotification("No dataset found in the main platform. Upload/select data there first.", type = "error")
+ return()
+ }
+ init_state_from_df(df, source_label = "platform_dataset")
+ showNotification("Dataset loaded from the main platform.", type = "message")
+ }, ignoreInit = TRUE)
+
+ # ======================================================================
+ # (Optional) Keep your existing upload behavior — only runs if UI has fileInput
+ # ======================================================================
+ observeEvent(input$file, {
+ req(input$file)
+
+ fname <- input$file$name
+ ext <- tools::file_ext(fname)
+ dname_raw <- tools::file_path_sans_ext(fname)
+
+ r_name <- make.names(dname_raw)
+ py_name <- gsub("[^A-Za-z0-9_]", "_", dname_raw)
+ r_obj_name(r_name)
+ py_obj_name(py_name)
+
+ df <- switch(
+ tolower(ext),
+ csv = data.table::fread(input$file$datapath, data.table = FALSE, showProgress = FALSE),
+ xlsx = readxl::read_excel(input$file$datapath),
+ dta = haven::read_dta(input$file$datapath),
+ { showNotification("Unsupported file type.", type = "error"); return() }
+ )
+
+ raw_data(df)
+ data(df)
+ anon_data(NULL)
+
+ anonymized_cols(character())
+ initial_qids(character())
+
+ previous_data_stack(list())
+ previous_r_stack(list())
+ previous_stata_stack(list())
+ previous_py_stack(list())
+
+ log_steps(character())
+ k_bins(list())
+
+ gen_assigned(list())
+ previous_gen_assigned_stack(list())
+
+ geo_preview_layer(NULL)
+ geo_after_layer(NULL)
+ geo_lat_col(NULL)
+ geo_lon_col(NULL)
+
+ risk_before_metrics(NULL)
+ risk_after_metrics(NULL)
+
+ r_pkg_base <- c("library(dplyr)", "library(sdcMicro)")
+ r_pkg_read <- if (tolower(ext) == "xlsx") "library(readxl)" else if (tolower(ext) == "dta") "library(haven)" else NULL
+
+ r_read <- if (tolower(ext) == "csv") {
+ sprintf("%s <- read.csv(%s, stringsAsFactors = FALSE)", r_name, shQuote(fname))
+ } else if (tolower(ext) == "xlsx") {
+ sprintf("%s <- read_excel(%s)", r_name, shQuote(fname))
+ } else {
+ sprintf("%s <- read_dta(%s)", r_name, shQuote(fname))
+ }
+
+ import_snip(c("# Load data", r_pkg_base, r_pkg_read, r_read))
+ r_steps(import_snip())
+
+ stata_snip <- switch(
+ tolower(ext),
+ csv = sprintf("import delimited %s, clear", shQuote(fname)),
+ xlsx = sprintf("import excel %s, clear", shQuote(fname)),
+ dta = sprintf("use %s, clear", shQuote(fname))
+ )
+
+ import_stata_snip(c("// Load data", stata_snip))
+ stata_steps(import_stata_snip())
+
+ python_snip <- switch(
+ tolower(ext),
+ csv = c("# Load data", "import pandas as pd", sprintf("%s = pd.read_csv(%s)", py_name, shQuote(fname))),
+ xlsx = c("# Load data", "import pandas as pd", sprintf("%s = pd.read_excel(%s)", py_name, shQuote(fname))),
+ dta = c("# Load data", "import pandas as pd", "import pyreadstat",
+ sprintf("%s, meta = pyreadstat.read_dta(%s)", py_name, shQuote(fname)))
+ )
+
+ import_py_snip(python_snip)
+ python_steps(import_py_snip())
+
+ shinyAce::updateAceEditor(session, "r_code_ace", value = paste(r_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "stata_code_ace", value = paste(stata_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "python_code_ace", value = paste(python_steps(), collapse = "\n\n"))
+ })
+
+ # ---------- Bin-Size Advisor -------------------------------------------
+ observe({
+ req(data())
+ nums <- names(data())[sapply(data(), is.numeric)]
+ updateSelectInput(
+ session, "advisor_var",
+ choices = nums,
+ selected = if (length(nums) > 0) nums[1] else NULL
+ )
+ })
+
+ advisor_x <- reactive({
+ req(data(), input$advisor_var)
+ na.omit(data()[[input$advisor_var]])
+ })
+
+ output$advisor_dist <- renderPlot({
+ req(advisor_x())
+ x <- advisor_x()
+ hist(
+ x,
+ main = paste("Histogram of", input$advisor_var),
+ xlab = input$advisor_var,
+ border = "white"
+ )
+ })
+
+ output$advisor_summary <- renderPrint({
+ req(advisor_x())
+ x <- advisor_x()
+ stats <- c(
+ Mean = mean(x),
+ Median = median(x),
+ IQR = IQR(x),
+ SD = sd(x),
+ Min = min(x),
+ Max = max(x)
+ )
+ print(stats)
+ })
+
+ bin_advice <- eventReactive(input$advisor_run, {
+ req(advisor_x())
+ x <- advisor_x()
+ n <- length(x)
+ rng <- range(x)
+ range_x <- diff(rng)
+ iqr_x <- IQR(x)
+ sd_x <- sd(x)
+
+ k_sturges <- ceiling(log2(n) + 1)
+ w_sturges <- range_x / k_sturges
+
+ w_fd <- if (iqr_x > 0) 2 * iqr_x / (n^(1 / 3)) else NA
+ k_fd <- if (!is.na(w_fd) && w_fd > 0) ceiling(range_x / w_fd) else NA
+
+ w_scott <- if (sd_x > 0) 3.5 * sd_x / (n^(1 / 3)) else NA
+ k_scott <- if (!is.na(w_scott) && w_scott > 0) ceiling(range_x / w_scott) else NA
+
+ k_sqrt <- ceiling(sqrt(n))
+ w_sqrt <- range_x / k_sqrt
+
+ data.frame(
+ Method = c("Sturges", "Freedman–Diaconis", "Scott", "Square root"),
+ Bin_Width = c(w_sturges, w_fd, w_scott, w_sqrt),
+ Num_Bins = c(k_sturges, k_fd, k_scott, k_sqrt),
+ stringsAsFactors = FALSE
+ )
+ })
+
+ output$advisor_table <- renderTable({
+ df <- bin_advice()
+ df$Bin_Width <- round(df$Bin_Width, 2)
+ df
+ }, striped = TRUE, hover = TRUE, spacing = "l")
+
+ output$advisor_plot <- renderPlot({
+ df <- bin_advice()
+ req(nrow(df) > 0)
+ x <- advisor_x()
+
+ old_par <- par(no.readonly = TRUE)
+ on.exit(par(old_par), add = TRUE)
+
+ rows <- ceiling(nrow(df) / 2)
+ cols <- 2
+ par(mfrow = c(rows, cols), mar = c(4, 4, 2, 1))
+
+ for (i in seq_len(nrow(df))) {
+ bw <- df$Bin_Width[i]
+ if (!is.finite(bw) || bw <= 0) next
+ brks <- seq(min(x, na.rm = TRUE), max(x, na.rm = TRUE) + bw, by = bw)
+ hist(x, breaks = brks, main = df$Method[i], xlab = input$advisor_var, border = "white")
+ }
+ })
+
+ # ---------- Risk metric calculators (cached) ----------------------------
+ calc_risk_metrics <- function(df, cols) {
+ if (!nrow(df) || !length(cols)) return(NULL)
+ tbl <- df %>%
+ dplyr::group_by(dplyr::across(dplyr::all_of(cols))) %>%
+ dplyr::summarise(n = dplyr::n(), .groups = "drop") %>%
+ dplyr::mutate(r = 1 / n)
+ list(
+ avg = mean(tbl$r),
+ max = max(tbl$r),
+ pct_unique = mean(tbl$n == 1)
+ )
+ }
+
+ observeEvent(list(data(), initial_qids()), {
+ req(data(), initial_qids())
+ q <- risk_qids(initial_qids())
+ if (!length(q)) { risk_before_metrics(NULL); return() }
+ risk_before_metrics(calc_risk_metrics(data(), q))
+ }, ignoreInit = TRUE)
+
+ observeEvent(list(anon_data(), initial_qids()), {
+ req(anon_data(), initial_qids())
+ q <- risk_qids(initial_qids())
+ if (!length(q)) { risk_after_metrics(NULL); return() }
+ df_a <- anon_data()
+ after_cols <- sapply(q, get_after_col, df_after = df_a, USE.NAMES = FALSE)
+ risk_after_metrics(calc_risk_metrics(df_a, after_cols))
+ }, ignoreInit = TRUE)
+
+ # ---------- Risk AFTER (coords excluded) --------------------------------
+ output$risk_after <- renderUI({
+ req(anon_data(), initial_qids())
+ q <- risk_qids(initial_qids())
+ vld(need(length(q) > 0, "Select non-coordinate QIDs."))
+
+ m <- risk_after_metrics()
+ req(m)
+
+ tags$p(sprintf(
+ "After: Average Risk: %.4f; Maximum Risk: %.4f; Percentage Unique: %.4f%%",
+ m$avg, m$max, m$pct_unique * 100
+ ))
+ })
+
+ output$gauge_after <- flexdashboard::renderGauge({
+ req(anon_data(), initial_qids())
+ q <- risk_qids(initial_qids())
+ vld(need(length(q) > 0, "Select non-coordinate QIDs."))
+
+ m <- risk_after_metrics()
+ req(m)
+
+ pct <- round(m$avg * 100, 2)
+ flexdashboard::gauge(
+ pct, min = 0, max = 100, symbol = "%",
+ sectors = flexdashboard::gaugeSectors(success = c(0, 20), warning = c(20, 50), danger = c(50, 100)),
+ label = sprintf("%.2f%%", pct), abbreviate = FALSE
+ )
+ })
+
+ output$gauge_after_value <- renderText({
+ req(anon_data(), initial_qids())
+ q <- risk_qids(initial_qids())
+ vld(need(length(q) > 0, "Select non-coordinate QIDs."))
+
+ m <- risk_after_metrics()
+ req(m)
+
+ sprintf("%.2f%%", round(m$avg * 100, 2))
+ })
+
+ # ---------- Descriptions (TRANSLATABLE) ------------------------------------
+ app_dir <- shiny::getShinyOption("appDir")
+ if (is.null(app_dir) || !nzchar(app_dir)) app_dir <- getwd()
+
+ manual2_rmd <- normalizePath(
+ file.path(app_dir, "server", "anon", "docs", "descriptions1.Rmd"),
+ mustWork = FALSE
+ )
+
+ manual2_html <- normalizePath(
+ file.path(app_dir, "server", "anon", "www", "descriptions1.html"),
+ mustWork = FALSE
+ )
+
+
+ # Point to the SAME excel file your get_rv_labels() uses
+ labels_xlsx <- normalizePath(
+ file.path(app_dir, "labelling_file_with_manual_full.xlsx"),
+ mustWork = FALSE
+ )
+
+ # ---- IMPORTANT: define a *reactive* language getter used by the rest of the app ----
+ # Replace the body of this function with your app's real language source.
+ get_current_language <- reactive({
+ # Common patterns (pick the one that matches your app):
+ # input$change_language
+ # input$language
+ # rv_labels$language
+ # rv_translation$lang
+
+ # BEST fallback (won't break even if input doesn't exist):
+ lang <- NULL
+ if (!is.null(input$change_language) && nzchar(input$change_language)) lang <- input$change_language
+ if (!is.null(input$language) && nzchar(input$language)) lang <- input$language
+ if (is.null(lang)) lang <- "English"
+
+ lang
+ })
+
+ output$descriptions_panel <- renderUI({
+ lang <- get_current_language() # <- THIS makes it reactive to language changes
+ req(file.exists(manual2_rmd))
+ req(file.exists(labels_xlsx))
+
+ out_dir <- tempdir()
+
+ # Create a language-specific output name so it doesn't reuse the old HTML
+ out_file <- file.path(out_dir, paste0("descriptions1_", lang, ".html"))
+
+ ok <- TRUE
+ tryCatch({
+ rmarkdown::render(
+ input = manual2_rmd,
+ output_file = basename(out_file),
+ output_dir = out_dir,
+ output_format = "html_document",
+ params = list(
+ language = lang,
+ labels_path = labels_xlsx
+ ),
+ envir = new.env(parent = globalenv()),
+ quiet = TRUE
+ )
+ }, error = function(e) {
+ ok <<- FALSE
+ showNotification(paste("Descriptions render error:", e$message), type = "error")
+ })
+
+ if (!ok) return(NULL)
+
+ # Resource path must be unique per language (and optionally per render)
+ res_name_tmp <- paste0("anon_docs_tmp_", gsub("[^A-Za-z0-9]", "_", lang), "_", session$token)
+ addResourcePath(res_name_tmp, out_dir)
+
+ # Cache-bust so iframe reloads when switching languages
+ tags$iframe(
+ src = paste0(res_name_tmp, "/", basename(out_file), "?v=", as.integer(Sys.time())),
+ style = "width:100%; height:calc(100vh - 150px); border:none;"
+ )
+ })
+
+
+
+ output$descriptions_panel <- renderUI({
+ res_name_html <- paste0("manuals2_", gsub("[^A-Za-z0-9]", "_", landing_dom))
+ res_name_tmp <- paste0("manuals2_tmp_", gsub("[^A-Za-z0-9]", "_", landing_dom))
+
+ if (file.exists(manual2_html)) {
+ addResourcePath(res_name_html, normalizePath(file.path("..", "Anon", "www"), mustWork = TRUE))
+ tags$iframe(
+ src = paste0(res_name_html, "/", basename(manual2_html)),
+ style = "width:100%; height:calc(100vh - 150px); border:none;"
+ )
+
+ } else if (file.exists(manual2_rmd)) {
+ out_dir <- tempdir()
+ out_file <- file.path(out_dir, "descriptions1.html")
+ ok <- TRUE
+
+ tryCatch({
+ rmarkdown::render(
+ manual2_rmd,
+ output_file = basename(out_file),
+ output_dir = out_dir,
+ output_format = "html_document",
+ quiet = TRUE
+ )
+ }, error = function(e) {
+ ok <<- FALSE
+ showNotification(paste("Manual render error:", e$message), type = "error")
+ })
+
+ if (!ok) return(NULL)
+ addResourcePath(res_name_tmp, out_dir)
+
+ tags$iframe(
+ src = paste0(res_name_tmp, "/", basename(out_file)),
+ style = "width:100%; height:calc(100vh - 150px); border:none;"
+ )
+
+ } else {
+ tags$div(style = "padding:10px;", "Manual not available.")
+ }
+ })
+
+ # ---------- Suppress & Remove Direct Identifiers ------------------------
+ output$identifier_selector <- renderUI({
+ req(data())
+ tagList(
+ checkboxGroupInput(ns("direct_ids"), "Select Direct Identifiers to Remove:", choices = names(data())),
+ actionButton(ns("remove_ids"), "Suppress & Remove Identifiers", class = "btn btn-danger btn-block")
+ )
+ })
+
+ observeEvent(input$remove_ids, {
+ req(data(), input$direct_ids)
+
+ previous_data_stack(c(previous_data_stack(), list(if (is.null(anon_data())) data() else anon_data())))
+ previous_r_stack(c(previous_r_stack(), list(r_steps())))
+ previous_stata_stack(c(previous_stata_stack(), list(stata_steps())))
+ previous_py_stack(c(previous_py_stack(), list(python_steps())))
+
+ df2 <- data()[, !names(data()) %in% input$direct_ids, drop = FALSE]
+ data(df2)
+ anon_data(NULL)
+ anonymized_cols(character())
+ initial_qids(character())
+
+ risk_before_metrics(NULL)
+ risk_after_metrics(NULL)
+
+ cols <- input$direct_ids
+ r_name <- r_obj_name() %||% "platform_dataset"
+ py_name <- py_obj_name() %||% "platform_dataset"
+
+ code_r <- paste0(
+ "# Suppression\nlibrary(dplyr)\n",
+ r_name, " <- ", r_name, " %>% dplyr::select(-", paste(cols, collapse = ", -"), ")"
+ )
+ code_s <- paste0("// Suppression\n", "drop ", paste(cols, collapse = " "))
+ code_py <- paste0(
+ "# Suppression\nimport pandas as pd\n",
+ py_name, ".drop(columns=[", paste(shQuote(cols), collapse = ", "), "], inplace=True)"
+ )
+
+ r_steps(c(r_steps(), code_r))
+ stata_steps(c(stata_steps(), code_s))
+ python_steps(c(python_steps(), code_py))
+ log_steps(c(log_steps(), paste(Sys.time(), "- Suppressed IDs")))
+
+ shinyAce::updateAceEditor(session, "r_code_ace", value = paste(r_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "stata_code_ace", value = paste(stata_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "python_code_ace", value = paste(python_steps(), collapse = "\n\n"))
+
+ showNotification("Identifiers suppressed & removed.", type = "message")
+ })
+
+ # ---------- QID picker & sync -------------------------------------------
+ output$bucket_ui <- renderUI({
+ req(data())
+ avail <- setdiff(names(data()), safe_selected_qids())
+
+ sortable::bucket_list(
+ header = "Drag variables into QID bucket",
+ group_name = ns("qid_group"),
+ orientation = "horizontal",
+ sortable::add_rank_list("Available Variables", ns("available_vars"), labels = avail),
+ sortable::add_rank_list("Selected QIDs", ns("selected_qids"), labels = initial_qids())
+ )
+ })
+
+ observeEvent(input$selected_qids, {
+ initial_qids(input$selected_qids %||% character())
+ }, ignoreNULL = FALSE)
+
+ observe({
+ req(initial_qids())
+ if (!isTRUE(safe_method() == "Generalization")) return()
+
+ choices <- initial_qids()
+ if (length(choices) == 0) return()
+
+ current <- isolate(input$gen_var)
+ if (is.null(current) || !(current %in% choices)) {
+ updateSelectInput(session, "gen_var", choices = choices, selected = choices[1])
+ } else {
+ updateSelectInput(session, "gen_var", choices = choices, selected = current)
+ }
+ })
+
+ # ---------- Technique parameters UI (server side) -----------------------
+ output$k_num_picker <- renderUI({
+ req(data(), initial_qids())
+ nq <- initial_qids()[sapply(data()[, initial_qids(), drop = FALSE], is.numeric)]
+ if (length(nq) == 0) return(helpText("No numeric QIDs to bucket."))
+ selectInput(ns("k_bucket_var"), "Numeric QID to bucket:", choices = nq, selected = nq[1])
+ })
+
+ output$extra_input <- renderUI({
+ req(data())
+ cols <- names(data())
+
+ switch(
+ safe_method(),
+
+ "Masking" = checkboxGroupInput(ns("mask_cols"), "Columns to mask:", choices = cols),
+ "Suppression" = checkboxGroupInput(ns("supp_cols"), "Columns to suppress:", choices = cols),
+ "Pseudonymization" = checkboxGroupInput(ns("ps_cols"), "Columns to pseudonymize:", choices = cols),
+ "Tokenization" = checkboxGroupInput(ns("tok_cols"), "Columns to tokenize:", choices = cols),
+
+ "Bucketing" = tagList(
+ selectInput(ns("bucket_col"), "Column to bucket:", choices = cols),
+ numericInput(ns("bin_interval"), "Bin size:", value = 4, min = 1)
+ ),
+
+ "K-Anonymity" = tagList(
+ uiOutput(ns("k_num_picker")),
+ numericInput(ns("k_bin_size"), "Bin size:", value = 5, min = 1),
+ numericInput(ns("k_value"), "k threshold:", value = 2, min = 2)
+ ),
+
+ "L-Diversity" = tagList(
+ selectInput(ns("sensitive_attr"), "Sensitive attribute:", choices = cols),
+ numericInput(ns("l_value"), "l threshold:", value = 2, min = 2)
+ ),
+
+ "T-Closeness" = tagList(
+ selectInput(ns("sensitive_attr_tc"), "Sensitive attribute:", choices = cols),
+ numericInput(ns("t_threshold"), "t threshold:", value = 0.1, min = 0, step = 0.01)
+ ),
+
+ "Generalization" = tagList(
+ selectInput(ns("gen_var"), "Variable to generalize:", choices = initial_qids()),
+ uiOutput(ns("gen_groups_ui"))
+ ),
+
+ "Anonymize Coordinates" = tagList(
+ selectInput(ns("geo_lat_col"), "Latitude column:", choices = cols),
+ selectInput(ns("geo_lon_col"), "Longitude column:", choices = cols),
+ radioButtons(
+ ns("geo_mode"), "Mode:",
+ choices = c("Truncate decimals" = "truncate", "Aggregate to polygons" = "aggregate"),
+ selected = "aggregate", inline = TRUE
+ ),
+ conditionalPanel(
+ condition = sprintf("input['%s'] == 'truncate'", ns("geo_mode")),
+ numericInput(ns("geo_decimals"), "Keep this many decimals:", value = 3, min = 0, step = 1),
+ helpText("Truncation (not rounding): reduces precision to the specified number of decimals.")
+ ),
+ conditionalPanel(
+ condition = sprintf("input['%s'] == 'aggregate'", ns("geo_mode")),
+ numericInput(ns("geo_grid_m"), "Grid cell size (meters):", value = 500, min = 10, step = 10),
+ helpText("Points are snapped to the grid cell centroid; polygons are drawn on the map.")
+ ),
+ helpText("Preview: original points + faint preview (for truncate). Apply: dataset updated; polygons drawn.")
+ ),
+
+ NULL
+ )
+ })
+
+ # ---------- Apply anonymization (incl. GEO) --------------------------------
+ observeEvent(input$apply, {
+ req(data(), input$method)
+ if (input$method != "Anonymize Coordinates") req(initial_qids())
+
+ last_method(input$method)
+
+ previous_data_stack(c(previous_data_stack(), list(if (is.null(anon_data())) data() else anon_data())))
+ previous_r_stack(c(previous_r_stack(), list(r_steps())))
+ previous_stata_stack(c(previous_stata_stack(), list(stata_steps())))
+ previous_py_stack(c(previous_py_stack(), list(python_steps())))
+
+ df <- if (is.null(anon_data())) data() else anon_data()
+ code_r <- code_s <- code_py <- NULL
+ step_cols <- NULL
+ r_name <- r_obj_name() %||% "platform_dataset"
+ py_name <- py_obj_name() %||% "platform_dataset"
+
+ if (input$method == "Masking") {
+ step_cols <- input$mask_cols
+ if (length(step_cols) == 0) { showNotification("Select columns to mask.", type="error"); return() }
+
+ df[step_cols] <- lapply(df[step_cols], function(x)
+ ifelse(is.na(x), NA_character_, strrep("*", 10))
+ )
+
+ code_r <- paste0(
+ "# Masking (strings & numerics)\nlibrary(dplyr)\n",
+ r_name, " <- ", r_name, " %>% dplyr::mutate(across(all_of(c(",
+ paste(shQuote(step_cols), collapse = ", "),
+ ")), ~ ifelse(is.na(.), NA_character_, strrep(\"*\", 10))))"
+ )
+
+ code_s <- paste0(
+ "// Masking (strings & numerics)\n",
+ "foreach v of varlist ", paste(step_cols, collapse = " "), " {\n",
+ " capture confirm numeric variable `v'\n",
+ " if !_rc { tostring `v', replace force }\n",
+ " replace `v' = \"**********\" if !missing(`v')\n",
+ "}"
+ )
+
+ code_py <- paste0(
+ "# Masking (strings & numerics)\n",
+ "import pandas as pd\n",
+ "for col in [", paste(shQuote(step_cols), collapse = ", "), "]:\n",
+ " ", py_name, "[col] = ", py_name, "[col].apply(lambda x: None if pd.isna(x) else '*'*10)"
+ )
+
+ } else if (input$method == "Suppression") {
+ step_cols <- input$supp_cols
+ if (length(step_cols) == 0) { showNotification("Select columns to suppress.", type="error"); return() }
+
+ df <- df[, !names(df) %in% step_cols, drop = FALSE]
+
+ code_r <- paste0(
+ "# Suppression\nlibrary(dplyr)\n",
+ r_name, " <- ", r_name, " %>% dplyr::select(-", paste(step_cols, collapse = ", -"), ")"
+ )
+ code_s <- paste0("// Suppression\n", "drop ", paste(step_cols, collapse = " "))
+ code_py <- paste0(
+ "# Suppression\nimport pandas as pd\n",
+ py_name, ".drop(columns=[", paste(shQuote(step_cols), collapse = ", "), "], inplace=True)"
+ )
+
+ } else if (input$method == "Bucketing") {
+ step_cols <- input$bucket_col; b <- input$bin_interval
+ if (is.null(step_cols) || is.null(b)) { showNotification("Choose a column and bin size.", type="error"); return() }
+
+ vals <- df[[step_cols]]
+ if (!is.numeric(vals)) { showNotification("Bucketing requires numeric column.", type = "error"); return() }
+
+ minv <- suppressWarnings(min(vals, na.rm = TRUE)); maxv <- suppressWarnings(max(vals, na.rm = TRUE))
+ if (!is.finite(minv) || !is.finite(maxv)) { showNotification("No finite values to bucket.", type="error"); return() }
+
+ start <- floor(minv / b) * b; end <- ceiling((maxv + 1) / b) * b
+ brks <- seq(start, end, by = b)
+ lbls <- paste0(head(brks, -1), "-", brks[-1] - 1)
+
+ df[[step_cols]] <- cut(vals, breaks = brks, labels = lbls, include.lowest = TRUE, right = FALSE)
+
+ code_r <- paste0(
+ "# Bucketing\nlibrary(dplyr)\n",
+ r_name, " <- ", r_name, " %>% dplyr::mutate(", step_cols, " = cut(", step_cols, ",\n",
+ " breaks = seq(", start, ", ", end, ", by=", b, "),\n",
+ " labels = c(", paste(shQuote(lbls), collapse = ", "), "), right=FALSE, include_lowest=TRUE))"
+ )
+
+ code_s <- paste0(
+ "// Bucketing\n",
+ "gen long __b = floor(", step_cols, "/", b, ")*", b, "\n",
+ "tostring __b, gen(__bstr)\n",
+ "gen long __e = __b + ", b, " - 1\n",
+ "tostring __e, gen(__estr)\n",
+ "replace ", step_cols, " = __bstr + \"-\" + __estr if !missing(", step_cols, ")\n",
+ "drop __b __e __bstr __estr"
+ )
+
+ code_py <- paste0(
+ "# Bucketing\nimport pandas as pd\nimport numpy as np\n",
+ "b = ", b, "\n",
+ "minv = int(np.floor(", py_name, "['", step_cols, "'].min()/b)*b)\n",
+ "maxv = int(np.ceil((", py_name, "['", step_cols, "'].max()+1)/b)*b)\n",
+ "bins = list(range(minv, maxv+1, b))\n",
+ "labels = [f\"{bins[i]}-{bins[i+1]-1}\" for i in range(len(bins)-1)]\n",
+ py_name, "['", step_cols, "'] = pd.cut(", py_name, "['", step_cols, "'], bins=bins, labels=labels, right=False, include_lowest=True)"
+ )
+
+ } else if (input$method == "Pseudonymization") {
+ step_cols <- input$ps_cols
+ if (length(step_cols) == 0) { showNotification("Select columns to pseudonymize.", type="error"); return() }
+
+ salt_vec <- uuid::UUIDgenerate(n = nrow(df))
+ hash_vec <- function(values, salts) {
+ mapply(function(v, s) digest::digest(paste0(as.character(v), s), algo = "sha256"),
+ values, salts, USE.NAMES = FALSE)
+ }
+
+ for (col in step_cols) {
+ x <- df[[col]]
+ df[[col]] <- ifelse(is.na(x), NA_character_, hash_vec(x, salt_vec))
+ }
+
+ code_r <- "# Pseudonymization applied in-app"
+ code_s <- "// Pseudonymization\n// no direct Stata analogue"
+ code_py <- "# Pseudonymization applied in-app"
+
+ } else if (input$method == "Tokenization") {
+ step_cols <- input$tok_cols
+ if (length(step_cols) == 0) { showNotification("Select columns to tokenize.", type="error"); return() }
+
+ df <- dplyr::mutate(
+ df,
+ dplyr::across(
+ dplyr::all_of(step_cols),
+ ~ ifelse(is.na(.x), NA_character_, stringi::stri_rand_strings(length(.x), 10, pattern = "[A-Za-z0-9]"))
+ )
+ )
+
+ code_r <- "# Tokenization applied in-app"
+ code_s <- "// Tokenization\n// no direct Stata analogue"
+ code_py <- "# Tokenization applied in-app"
+
+ } else if (input$method == "K-Anonymity") {
+ qids_all <- initial_qids()
+ k_val <- as.integer(input$k_value)
+ var_now <- input$k_bucket_var
+ bin_size <- as.integer(input$k_bin_size)
+
+ vld(
+ need(length(qids_all) > 0, "Select QIDs in step 2 first."),
+ need(!is.na(k_val) && k_val >= 2, "k must be ≥ 2")
+ )
+
+ bins <- k_bins()
+ if (!is.null(var_now) && !is.na(bin_size)) { bins[[var_now]] <- bin_size; k_bins(bins) }
+
+ bucket_one <- function(x, b) {
+ if (!is.numeric(x)) return(x)
+ rng <- range(x, na.rm = TRUE)
+ if (!all(is.finite(rng))) return(x)
+ start <- floor(rng[1]/b)*b
+ end <- ceiling((rng[2]+1)/b)*b
+ cut(x, breaks = seq(start, end, by = b), right = FALSE, include.lowest = TRUE,
+ labels = sprintf("%d-%d", head(seq(start, end, by=b), -1), seq(start, end, by=b)[-1]-1))
+ }
+
+ if (length(bins)) for (nm in names(bins)) if (nm %in% names(df)) df[[nm]] <- bucket_one(df[[nm]], bins[[nm]])
+ k_qids_use <- intersect(names(bins), qids_all)
+ vld(need(length(k_qids_use) > 0, "Bucket at least one numeric QID before applying k-anonymity."))
+
+ grouped_sizes <- df %>%
+ dplyr::group_by(dplyr::across(dplyr::all_of(k_qids_use))) %>%
+ dplyr::tally(name=".__size__")
+
+ df_tagged <- df %>%
+ dplyr::left_join(grouped_sizes, by = k_qids_use) %>%
+ dplyr::mutate(.__ok__ = .__size__ >= k_val)
+
+ df_ok <- df_tagged %>% dplyr::filter(.__ok__) %>% dplyr::select(-.__ok__, -.__size__)
+ df_bad <- df_tagged %>% dplyr::filter(!.__ok__) %>% dplyr::select(-.__ok__, -.__size__)
+ df <- dplyr::bind_rows(df_ok, df_bad)
+
+ anon_data(df)
+ anonymized_cols(union(anonymized_cols(), k_qids_use))
+
+ code_r <- "# K-Anonymity applied in-app"
+ code_s <- "* no Stata analogue"
+ code_py <- "# no Python analogue"
+
+ } else if (input$method == "T-Closeness") {
+ step_cols <- risk_qids(initial_qids())
+ vld(need(length(step_cols) > 0, "Select non-coordinate QIDs."))
+
+ df <- apply_t_closeness(df, qids = step_cols, sensitive = input$sensitive_attr_tc, t = input$t_threshold)
+
+ code_r <- "# T-Closeness applied in-app"
+ code_s <- "// T-Closeness\n// no direct Stata analogue"
+ code_py <- "# T-Closeness applied in-app"
+
+ } else if (input$method == "Anonymize Coordinates") {
+
+ latc <- input$geo_lat_col; lonc <- input$geo_lon_col
+
+ vld(
+ need(!is.null(latc) && !is.null(lonc), "Pick latitude & longitude columns first."),
+ need(latc %in% names(df) && lonc %in% names(df), "Invalid coordinate columns."),
+ need(is.numeric(df[[latc]]), "Latitude must be numeric."),
+ need(is.numeric(df[[lonc]]), "Longitude must be numeric.")
+ )
+
+ pts_df <- df[, c(lonc, latc), drop = FALSE]
+ names(pts_df) <- c("lon","lat")
+ comp_mask_full <- stats::complete.cases(pts_df)
+ pts_df <- pts_df[comp_mask_full, , drop = FALSE]
+
+ if (!nrow(pts_df)) {
+ showNotification("No valid coordinate rows to anonymize.", type="error"); return()
+ }
+
+ pts_wgs <- sf::st_as_sf(pts_df, coords = c("lon","lat"), crs = 4326)
+ xy <- sf::st_coordinates(pts_wgs)
+ epsg <- guess_utm_epsg(xy[,1], xy[,2])
+ pts_utm <- safe_transform(pts_wgs, epsg)
+
+ mode <- input$geo_mode %||% "aggregate"
+ cell_wkt_full <- rep(NA_character_, nrow(df))
+
+ if (mode == "truncate") {
+ d <- max(0, suppressWarnings(as.integer(input$geo_decimals))); if (is.na(d)) d <- 3
+ trunc_dec <- function(x, d) { f <- 10^d; sign(x) * floor(abs(x) * f) / f }
+
+ xy_wgs <- sf::st_coordinates(safe_transform(pts_utm, 4326))
+ lon_a <- trunc_dec(xy_wgs[,1], d)
+ lat_a <- trunc_dec(xy_wgs[,2], d)
+
+ lat_mean <- mean(lat_a, na.rm = TRUE)
+ m_per_deg_lat <- 111320
+ m_per_deg_lon <- 111320 * cos(lat_mean * pi/180)
+ cell_m <- nz_pos(min(m_per_deg_lat, m_per_deg_lon) / (10^d), 100)
+
+ agg <- build_grid_agg(pts_utm, cell_m)
+ agg_wgs <- safe_transform(agg, 4326)
+ geo_after_layer(agg_wgs)
+
+ if (nrow(agg)) {
+ join_ix <- sf::st_intersects(pts_utm, agg)
+ agg_wkt <- sf::st_as_text(sf::st_geometry(agg_wgs))
+
+ cell_wkt_new <- rep(NA_character_, nrow(pts_utm))
+ for (i in seq_len(nrow(pts_utm))) {
+ cell_idx <- if (length(join_ix[[i]]) >= 1) join_ix[[i]][1] else NA_integer_
+ if (is.finite(cell_idx)) cell_wkt_new[i] <- agg_wkt[cell_idx]
+ }
+
+ df_rows_idx <- which(comp_mask_full)
+ df[[lonc]][df_rows_idx] <- lon_a
+ df[[latc]][df_rows_idx] <- lat_a
+ cell_wkt_full[df_rows_idx] <- cell_wkt_new
+ } else {
+ showNotification("No grid cells created (empty). Check decimals or data extent.", type="warning")
+ }
+
+ anonymized_cols(union(anonymized_cols(), c(latc, lonc, "cell_wkt")))
+ showNotification("Coordinate anonymization (truncate) applied.", type = "message")
+
+ } else if (mode == "aggregate") {
+ gsize <- nz_pos(input$geo_grid_m, 500)
+
+ agg <- build_grid_agg(pts_utm, gsize)
+ agg_wgs <- safe_transform(agg, 4326)
+ geo_after_layer(agg_wgs)
+
+ if (nrow(agg)) {
+ join_ix <- sf::st_intersects(pts_utm, agg)
+
+ cents <- sf::st_centroid(agg)
+ cents_wgs <- safe_transform(cents, 4326)
+ cxy <- sf::st_coordinates(cents_wgs)
+
+ agg_wkt <- sf::st_as_text(sf::st_geometry(agg_wgs))
+
+ lon_new <- rep(NA_real_, nrow(pts_utm))
+ lat_new <- rep(NA_real_, nrow(pts_utm))
+ cell_wkt_new <- rep(NA_character_, nrow(pts_utm))
+
+ for (i in seq_len(nrow(pts_utm))) {
+ cell_idx <- if (length(join_ix[[i]]) >= 1) join_ix[[i]][1] else NA_integer_
+ if (is.finite(cell_idx)) {
+ lon_new[i] <- cxy[cell_idx, 1]
+ lat_new[i] <- cxy[cell_idx, 2]
+ cell_wkt_new[i] <- agg_wkt[cell_idx]
+ }
+ }
+
+ df_rows_idx <- which(comp_mask_full)
+ df[[lonc]][df_rows_idx] <- lon_new
+ df[[latc]][df_rows_idx] <- lat_new
+ cell_wkt_full[df_rows_idx] <- cell_wkt_new
+
+ } else {
+ showNotification("No grid cells created (empty). Check grid size or data extent.", type="warning")
+ }
+
+ anonymized_cols(union(anonymized_cols(), c(latc, lonc, "cell_wkt")))
+ showNotification("Coordinate anonymization (aggregate to polygons) applied.", type = "message")
+
+ } else {
+ showNotification("Unsupported mode selected.", type = "error"); return()
+ }
+
+ df$cell_wkt <- cell_wkt_full
+ df <- df[, setdiff(names(df), c(lonc, latc)), drop = FALSE]
+
+ anon_data(df)
+ log_steps(c(log_steps(), paste(Sys.time(), "- Applied Anonymize Coordinates (mode:", mode, ")")))
+ geo_preview_layer(NULL)
+
+ code_r <- "# Anonymize Coordinates applied in-app (cell_wkt added; lat/lon dropped)"
+ code_s <- "* no direct Stata analogue"
+ code_py <- "# Anonymize Coordinates applied (cell_wkt added; lat/lon dropped)"
+ }
+
+ if (!is.null(step_cols) && input$method %in% c(
+ "Masking","Suppression","Bucketing","Pseudonymization","Tokenization","T-Closeness"
+ )) {
+ anon_data(df)
+ anonymized_cols(union(anonymized_cols(), step_cols))
+ }
+
+ if (!is.null(code_r)) r_steps( c(r_steps(), code_r))
+ if (!is.null(code_s)) stata_steps(c(stata_steps(), code_s))
+ if (!is.null(code_py)) python_steps(c(python_steps(), code_py))
+
+ if (input$method != "Anonymize Coordinates") {
+ log_steps(c(log_steps(), paste(Sys.time(), "- Applied", input$method)))
+ }
+
+ shinyAce::updateAceEditor(session, "r_code_ace", value = paste(r_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "stata_code_ace", value = paste(stata_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "python_code_ace", value = paste(python_steps(), collapse = "\n\n"))
+ })
+
+ # ---------- K-Report --------------------------------------------------------
+ output$k_report <- renderUI({
+ req(last_method())
+ if (last_method() == "K-Anonymity") {
+ bins <- k_bins(); k_val <- input$k_value
+ tags$div(
+ tags$h4("K-Anonymity Report"),
+ tags$p(paste0("Threshold k = ", k_val)),
+ if (length(bins) > 0)
+ tags$p(paste0("Bucket sizes: ",
+ paste(paste0(names(bins)," = ",bins), collapse=", ")))
+ )
+ }
+ })
+
+ # ---------- Generalization UI & Logic --------------------------------------
+ output$gen_groups_ui <- renderUI({
+ req(input$gen_var)
+ working <- if (!is.null(anon_data()) && input$gen_var %in% names(anon_data())) anon_data() else data()
+ is_num <- is.numeric(working[[input$gen_var]])
+ default_mode <- if (is_num) "numeric" else "categorical"
+
+ tagList(
+ radioButtons(
+ ns("gen_mode"), "Generalization mode:",
+ choices = c("Categorical (drag & drop)" = "categorical",
+ "Custom numeric ranges" = if (is_num) "numeric" else NULL),
+ selected = default_mode, inline = TRUE
+ ),
+
+ conditionalPanel(
+ condition = sprintf("input['%s'] == 'categorical'", ns("gen_mode")),
+ {
+ cats <- unique(as.character(working[[input$gen_var]]))
+ tagList(
+ sortable::bucket_list(
+ header = "Drag values to collapse",
+ group_name = ns("gen_group"),
+ orientation= "horizontal",
+ sortable::add_rank_list("Available categories", ns("gen_available"), labels = cats),
+ sortable::add_rank_list("Selected categories", ns("gen_selected"), labels = NULL)
+ ),
+ textInput(ns("gen_new_label"), "New label for selected categories:"),
+ actionButton(ns("apply_generalization"), "Apply Generalization", class = "btn btn-primary btn-block")
+ )
+ }
+ ),
+
+ conditionalPanel(
+ condition = sprintf("input['%s'] == 'numeric'", ns("gen_mode")),
+ tagList(
+ helpText("Define numeric buckets: e.g., '0-10:Group 1;11-40:Group 2;41-100:Group 3'"),
+ textInput(ns("gen_ranges_text"), "Ranges (lower-upper:label; semicolon separated):",
+ value = "0-10:Group 1;11-40:Group 2;41-100:Group 3"),
+ helpText("Use '-Inf' or 'Inf' for open bounds. Ranges must not overlap or touch."),
+ actionButton(ns("apply_generalization"), "Apply Generalization", class = "btn btn-primary btn-block")
+ )
+ )
+ )
+ })
+
+ observeEvent(input$apply_generalization, {
+ req(data(), input$gen_var, input$gen_mode)
+ last_method("Generalization")
+
+ previous_data_stack(c(previous_data_stack(), list(if (is.null(anon_data())) data() else anon_data())))
+ previous_r_stack(c(previous_r_stack(), list(r_steps())))
+ previous_stata_stack(c(previous_stata_stack(), list(stata_steps())))
+ previous_py_stack(c(previous_py_stack(), list(python_steps())))
+ previous_gen_assigned_stack(c(previous_gen_assigned_stack(), list(gen_assigned())))
+
+ df <- if (is.null(anon_data())) data() else anon_data()
+ var <- input$gen_var
+ code_r <- code_s <- code_py <- NULL
+
+ if (input$gen_mode == "categorical") {
+ sel <- input$gen_selected; lab <- input$gen_new_label
+ if (is.null(sel) || length(sel) == 0) { showNotification("No categories selected to generalize.", type = "error"); return() }
+ if (is.null(lab) || trimws(lab) == "") { showNotification("Provide a new label.", type = "error"); return() }
+
+ ga <- gen_assigned(); ga[[var]] <- unique(c(ga[[var]] %||% character(), sel)); gen_assigned(ga)
+ df[[var]] <- as.character(df[[var]]); df[[var]][ df[[var]] %in% sel ] <- lab
+
+ anon_data(df)
+ anonymized_cols(union(anonymized_cols(), var))
+
+ code_r <- "# Generalization (categorical) applied in-app"
+ code_s <- "// Generalization\n// no direct Stata analogue"
+ code_py <- "# Generalization applied in-app"
+
+ } else if (input$gen_mode == "numeric") {
+ req(input$gen_ranges_text); range_txt <- input$gen_ranges_text
+ ranges_df <- tryCatch({ parse_ranges_text(range_txt) }, error = function(e) {
+ showNotification(paste("Range parse error:", e$message), type = "error"); NULL
+ })
+ if (is.null(ranges_df)) return()
+ if (!is.numeric(df[[var]])) { showNotification(sprintf("Variable '%s' must be numeric.", var), type = "error"); return() }
+
+ x <- df[[var]]
+ generalized <- rep(NA_character_, length(x))
+
+ for (i in seq_len(nrow(ranges_df))) {
+ lower <- ranges_df$lower[i]; upper <- ranges_df$upper[i]; label <- as.character(ranges_df$label[i])
+ in_range <- (x >= lower) & (x <= upper)
+ if (is.infinite(lower) && lower < 0) in_range <- x <= upper
+ if (is.infinite(upper) && upper > 0) in_range <- x >= lower
+ generalized[in_range] <- label
+ }
+
+ df[[var]] <- generalized
+ anon_data(df)
+ anonymized_cols(union(anonymized_cols(), var))
+
+ code_r <- "# Generalization (numeric ranges) applied in-app"
+ code_s <- "// Generalization\n// no direct Stata analogue"
+ code_py <- "# Generalization applied in-app"
+ }
+
+ if (!is.null(code_r)) r_steps(c(r_steps(), code_r))
+ if (!is.null(code_s)) stata_steps(c(stata_steps(), code_s))
+ if (!is.null(code_py)) python_steps(c(python_steps(), code_py))
+ log_steps(c(log_steps(), paste(Sys.time(), "- Applied generalization (mode:", input$gen_mode, ") to", var)))
+
+ shinyAce::updateAceEditor(session, "r_code_ace", value = paste(r_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "stata_code_ace", value = paste(stata_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session, "python_code_ace", value = paste(python_steps(), collapse = "\n\n"))
+
+ showNotification("Generalization applied.", type = "message")
+ })
+
+ # ---------- Copy buttons ----------------------------------------------------
+ observeEvent(input$copy_r, {
+ shinyjs::runjs(sprintf("copyAce('%s','%s')", ns("r_code_ace"), ns("copy_r")))
+ })
+ observeEvent(input$copy_stata, {
+ shinyjs::runjs(sprintf("copyAce('%s','%s')", ns("stata_code_ace"), ns("copy_stata")))
+ })
+ observeEvent(input$copy_py, {
+ shinyjs::runjs(sprintf("copyAce('%s','%s')", ns("python_code_ace"), ns("copy_py")))
+ })
+
+ # ---------- Undo & Reset ----------------------------------------------------
+ observeEvent(input$undo, {
+ prev <- previous_data_stack(); pr <- previous_r_stack()
+ ps <- previous_stata_stack(); pp <- previous_py_stack()
+ if (length(prev) > 0) {
+ idx <- length(prev)
+ anon_data(prev[[idx]])
+ r_steps(pr[[idx]]); stata_steps(ps[[idx]]); python_steps(pp[[idx]])
+ previous_data_stack(prev[-idx]); previous_r_stack(pr[-idx])
+ previous_stata_stack(ps[-idx]); previous_py_stack(pp[-idx])
+ log_steps(c(log_steps(), paste(Sys.time(), "- Undid last step")))
+
+ pgs <- previous_gen_assigned_stack()
+ if (length(pgs) > 0) {
+ gen_assigned(pgs[[length(pgs)]])
+ previous_gen_assigned_stack(pgs[-length(pgs)])
+ }
+
+ geo_preview_layer(NULL); geo_after_layer(NULL)
+
+ showNotification("Undo successful.", type = "message")
+ shinyAce::updateAceEditor(session,"r_code_ace", value = paste(r_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session,"stata_code_ace", value = paste(stata_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session,"python_code_ace", value = paste(python_steps(), collapse = "\n\n"))
+ }
+ })
+
+ observeEvent(input$reset, {
+ req(raw_data())
+ data(raw_data()); anon_data(NULL)
+ anonymized_cols(character()); initial_qids(character())
+ r_steps(import_snip()); stata_steps(import_stata_snip()); python_steps(import_py_snip())
+ previous_data_stack(list()); previous_r_stack(list())
+ previous_stata_stack(list()); previous_py_stack(list())
+ log_steps(c(log_steps(), paste(Sys.time(),"- Reset complete")))
+ k_bins(list()); gen_assigned(list()); previous_gen_assigned_stack(list())
+
+ geo_preview_layer(NULL); geo_after_layer(NULL)
+ geo_lat_col(NULL); geo_lon_col(NULL)
+
+ risk_before_metrics(NULL)
+ risk_after_metrics(NULL)
+
+ showNotification("Reset complete.", type = "warning")
+ shinyAce::updateAceEditor(session,"r_code_ace", value = paste(r_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session,"stata_code_ace", value = paste(stata_steps(), collapse = "\n\n"))
+ shinyAce::updateAceEditor(session,"python_code_ace", value = paste(python_steps(), collapse = "\n\n"))
+ })
+
+ # ---------- Preview merged table -------------------------------------------
+ output$preview_merged <- renderTable({
+ req(data())
+ orig <- data()
+ cur <- if (is.null(anon_data())) orig else anon_data()
+
+ pr <- list()
+ for (col in anonymized_cols()) {
+ if (col %in% names(orig) && col %in% names(cur)) {
+ pr[[col]] <- orig[[col]]
+ pr[[paste0(col, "_anon")]] <- cur[[col]]
+ }
+ }
+ for (col in setdiff(names(cur), anonymized_cols())) pr[[col]] <- cur[[col]]
+
+ head(as.data.frame(pr, stringsAsFactors = FALSE), 10)
+ }, rownames = FALSE)
+
+ # ---------- Map (safe basemap + module-safe leafletProxy) -------------------
+ output$geo_map <- leaflet::renderLeaflet({
+ req(input$method == "Anonymize Coordinates", data())
+
+ m <- leaflet::leaflet(options = leaflet::leafletOptions(minZoom = 1)) |>
+ leaflet::addTiles() |>
+ leaflet::addScaleBar(position = "bottomleft")
+
+ latc <- input$geo_lat_col
+ lonc <- input$geo_lon_col
+
+ if (!is.null(latc) && !is.null(lonc) &&
+ latc %in% names(data()) && lonc %in% names(data()) &&
+ is.numeric(data()[[latc]]) && is.numeric(data()[[lonc]])) {
+
+ df0 <- data()[, c(lonc, latc), drop = FALSE]
+ names(df0) <- c("lon","lat")
+ df0 <- df0[stats::complete.cases(df0), , drop = FALSE]
+
+ if (nrow(df0)) {
+ m <- m |>
+ leaflet::addCircleMarkers(
+ lng = df0$lon, lat = df0$lat,
+ radius = 3, stroke = FALSE, fillOpacity = 0.6,
+ clusterOptions = leaflet::markerClusterOptions(),
+ group = "Before (points)"
+ )
+
+ if (all(is.finite(range(df0$lon))) && all(is.finite(range(df0$lat)))) {
+ m <- m |>
+ leaflet::fitBounds(
+ lng1 = min(df0$lon), lat1 = min(df0$lat),
+ lng2 = max(df0$lon), lat2 = max(df0$lat)
+ )
+ }
+ }
+ }
+
+ m |>
+ leaflet::addLayersControl(
+ overlayGroups = c("Before (points)", "After (areas)"),
+ options = leaflet::layersControlOptions(collapsed = FALSE)
+ )
+ })
+
+ observe({
+ req(input$method == "Anonymize Coordinates")
+ pol <- geo_after_layer()
+
+ prx <- leaflet::leafletProxy(session$ns("geo_map"), session = session)
+
+ if (is.null(pol) || !inherits(pol, "sf") || nrow(pol) == 0) {
+ prx |> leaflet::clearGroup("After (areas)")
+ return()
+ }
+
+ pol <- sf::st_make_valid(pol)
+ pol <- sf::st_zm(pol, drop = TRUE, what = "ZM")
+ pol <- pol[!sf::st_is_empty(pol), , drop = FALSE]
+ if (!nrow(pol)) { prx |> leaflet::clearGroup("After (areas)"); return() }
+
+ prx |>
+ leaflet::clearGroup("After (areas)") |>
+ leaflet::addPolygons(
+ data = pol,
+ weight = 1, color = "#555555", fillOpacity = 0.5, fillColor = "#2A9D8F",
+ group = "After (areas)",
+ popup = ~paste0("n = ", n)
+ )
+ })
+
+ # ---------- Risk BEFORE (coords excluded) ----------------------------------
+ output$risk_before <- renderUI({
+ req(data(), initial_qids())
+ q <- risk_qids(initial_qids())
+ vld(need(length(q) > 0, "Select non-coordinate QIDs."))
+
+ m <- risk_before_metrics()
+ req(m)
+
+ tags$p(sprintf(
+ "Before: Average Risk: %.4f; Maximum Risk: %.4f; Percentage Unique: %.4f%%",
+ m$avg, m$max, m$pct_unique * 100
+ ))
+ })
+
+ output$gauge_before <- flexdashboard::renderGauge({
+ req(data(), initial_qids())
+ q <- risk_qids(initial_qids())
+ vld(need(length(q) > 0, "Select non-coordinate QIDs."))
+
+ m <- risk_before_metrics()
+ req(m)
+
+ pct <- round(m$avg * 100, 2)
+ flexdashboard::gauge(
+ pct, min=0, max=100, symbol="%",
+ sectors = flexdashboard::gaugeSectors(success=c(0,20),warning=c(20,50),danger=c(50,100)),
+ label=sprintf("%.2f%%", pct), abbreviate=FALSE
+ )
+ })
+
+ # ---------- Row counts / log -----------------------------------------------
+ output$n_obs_text <- renderText({
+ if (is.null(data())) return("")
+ after_txt <- if (!is.null(anon_data())) paste0(" | After: ", nrow(anon_data())) else ""
+ paste0("Rows: ", nrow(data()), after_txt)
+ })
+
+ output$step_log <- renderText({ paste(log_steps(), collapse = "\n") })
+
+ # ---------- Downloads -------------------------------------------------------
+ output$download <- downloadHandler(
+ filename = function() paste0("anonymized_", Sys.Date(), ".csv"),
+ content = function(file) {
+ dat <- anon_data()
+ if (is.null(dat)) stop("No anonymized data available to download.")
+ utils::write.csv(drop_geo_cols(dat), file, row.names = FALSE)
+ }
+ )
+
+ output$download_excel <- downloadHandler(
+ filename = function() paste0("anonymized_", Sys.Date(), ".xlsx"),
+ content = function(file) {
+ dat <- anon_data()
+ if (is.null(dat)) stop("No anonymized data available to download.")
+ openxlsx::write.xlsx(drop_geo_cols(dat), file)
+ }
+ )
+
+ output$download_dta <- downloadHandler(
+ filename = function() paste0("anonymized_", Sys.Date(), ".dta"),
+ content = function(file) {
+ dat <- anon_data()
+ if (is.null(dat)) stop("No anonymized data available to download.")
+ haven::write_dta(drop_geo_cols(dat), file)
+ }
+ )
+
+ # ---------- Report preview/download ----------------------------------------
+ render_html_report <- function() {
+ req(data(), anon_data(), initial_qids())
+
+ tmpl <- normalizePath(
+ file.path(shiny::getShinyOption("appDir") %||% getwd(), "server", "anon", "docs", "report_template.Rmd"),
+ mustWork = FALSE
+ )
+
+ if (!file.exists(tmpl)) {
+ showNotification(paste0("Missing report template: ", tmpl), type = "error")
+ return(NULL)
+ }
+
+
+ q_main <- risk_qids(initial_qids())
+ vld(need(length(q_main) > 0, "Select non-coordinate QIDs."))
+
+ before_tbl <- data() %>%
+ dplyr::group_by(dplyr::across(dplyr::all_of(q_main))) %>%
+ dplyr::summarise(n = dplyr::n(), .groups = "drop") %>%
+ dplyr::mutate(r = round(1 / n, 4))
+
+ bm <- list(
+ avg = round(mean(before_tbl$r), 4),
+ max = round(max(before_tbl$r), 4),
+ pct_unique = paste0(round(mean(before_tbl$n == 1)*100,4), "%")
+ )
+
+ df_after <- anon_data()
+ after_cols_main <- sapply(q_main, get_after_col, df_after = df_after, USE.NAMES = FALSE)
+
+ after_tbl <- df_after %>%
+ dplyr::group_by(dplyr::across(dplyr::all_of(after_cols_main))) %>%
+ dplyr::summarise(n = dplyr::n(), .groups = "drop") %>%
+ dplyr::mutate(r = round(1 / n, 4))
+
+ am <- list(
+ avg = round(mean(after_tbl$r), 4),
+ max = round(max(after_tbl$r), 4),
+ pct_unique = paste0(round(mean(after_tbl$n == 1)*100,4), "%")
+ )
+
+ all_subsets2 <- lapply(seq_along(q_main), function(k) combn(q_main, k, simplify = FALSE))
+ subsets <- unlist(all_subsets2, recursive = FALSE)
+
+ perm_tbl_before <- do.call(rbind, lapply(subsets, function(sub) {
+ df_sub <- data() %>%
+ dplyr::group_by(dplyr::across(dplyr::all_of(sub))) %>%
+ dplyr::summarise(n = dplyr::n(), .groups = "drop") %>%
+ dplyr::mutate(r = round(1 / n, 4))
+ data.frame(
+ QIDs = paste(sub, collapse = ", "),
+ Average_Risk = round(mean(df_sub$r), 4),
+ Maximum_Risk = round(max(df_sub$r), 4),
+ Percent_Unique = paste0(round(mean(df_sub$n == 1)*100,4), "%"),
+ stringsAsFactors = FALSE
+ )
+ }))
+
+ perm_tbl_after <- do.call(rbind, lapply(subsets, function(sub) {
+ after_sub <- sapply(sub, get_after_col, df_after = df_after, USE.NAMES = FALSE)
+ df_sub <- df_after %>%
+ dplyr::group_by(dplyr::across(dplyr::all_of(after_sub))) %>%
+ dplyr::summarise(n = dplyr::n(), .groups = "drop") %>%
+ dplyr::mutate(r = round(1 / n, 4))
+ data.frame(
+ QIDs = paste(sub, collapse = ", "),
+ Average_Risk = round(mean(df_sub$r), 4),
+ Maximum_Risk = round(max(df_sub$r), 4),
+ Percent_Unique = paste0(round(mean(df_sub$n == 1)*100,4), "%"),
+ stringsAsFactors = FALSE
+ )
+ }))
+
+ tmp_html <- tempfile(fileext = ".html")
+ rmarkdown::render(
+ input = tmpl,
+ output_format = "html_document",
+ output_file = basename(tmp_html),
+ output_dir = dirname(tmp_html),
+ params = list(
+ before_metrics = bm,
+ after_metrics = am,
+ perm_table_before = perm_tbl_before,
+ perm_table_after = perm_tbl_after
+ ),
+ envir = new.env(parent = globalenv()),
+ quiet = TRUE
+ )
+
+ tmp_html
+ }
+
+ observeEvent(input$view_report, {
+ req(data(), anon_data(), initial_qids())
+
+ withProgress(message = "Building report...", value = 0.1, {
+ path <- render_html_report()
+ if (is.null(path)) return(NULL)
+ res_name <- paste0("reports_tmp_", gsub("[^A-Za-z0-9]", "_", landing_dom))
+ addResourcePath(res_name, dirname(path))
+ report_temp(path)
+ })
+
+ if (is.null(report_temp())) return(NULL)
+
+ res_name <- paste0("reports_tmp_", gsub("[^A-Za-z0-9]", "_", landing_dom))
+ showModal(modalDialog(
+ title = "Risk Report Preview",
+ size = "l", easyClose = TRUE,
+ tags$iframe(
+ src = paste0(res_name, "/", basename(report_temp())),
+ style = "width:100%; height:600px; border:none;"
+ ),
+ footer = modalButton("Close")
+ ))
+ })
+
+ output$download_report <- downloadHandler(
+ filename = function() paste0("risk_report_", Sys.Date(), ".pdf"),
+ content = function(file) {
+ req(report_temp())
+ if (requireNamespace("pagedown", quietly = TRUE)) {
+ pagedown::chrome_print(input = report_temp(), output = file)
+ } else {
+ file.copy(report_temp(), file, overwrite = TRUE)
+ }
+ }
+ )
+}
diff --git a/server/deep_learning.R b/server/deep_learning.R
index ddb7824..e288e7f 100644
--- a/server/deep_learning.R
+++ b/server/deep_learning.R
@@ -4,1078 +4,1078 @@
options(shiny.maxRequestSize = 2000*1024^2)
deep_learning = function() {
+
+ api_url <- "http://23.135.236.5:3186"
+
+ # ==============================================================================
+ # == 1. CORE REACTIVE VALUES
+ # ==============================================================================
+
+ # --- For Live Job Polling ---
+ polled_data <- reactiveVal(list(
+ status = "Idle", task = "N/A", log = "", progress = list(percentage = 0, text = "Idle")
+ ))
+ polled_metrics <- reactiveVal(NULL)
+ active_job_id <- reactiveVal(NULL)
+
+ # --- For Data Management ---
+ data_upload_status <- reactiveVal("")
+ processing_dataset_id <- reactiveVal(NULL)
+ refresh_data_trigger <- reactiveVal(0) # Triggers reload of dataset lists
+
+ # --- For Model Registry ---
+ model_registry <- reactiveVal(NULL)
+
+ # --- For History Tab ---
+ history_metrics <- reactiveVal(NULL)
+ history_jobs_df <- reactiveVal(NULL)
+ history_poller_active <- reactiveVal(FALSE)
+
+ # --- For Inference Tab ---
+ obj_inference_result <- reactiveVal(list(status = "Ready", image_url = NULL, error = NULL))
+ asr_inference_result <- reactiveVal(list(status = "Ready", transcription = NULL, error = NULL))
+ img_class_inference_result <- reactiveVal(list(status = "Ready", prediction = NULL, error = NULL))
+ seg_inference_result <- reactiveVal(list(status = "Ready", image_url = NULL, error = NULL))
+
+ # --- Table Proxies ---
+ proxy_eval_table <- dataTableProxy("eval_table")
+ proxy_dataset_table <- dataTableProxy("dataset_table")
+ proxy_history_eval_table <- dataTableProxy("history_eval_table")
+
+
+ # ==============================================================================
+ # == 2. INITIALIZATION & SIDEBAR LOGIC
+ # ==============================================================================
+
+ # --- Fetch Model Registry on Startup ---
+ ## FIXME: This should only happen if deeplearning module is activated
+ observe({
+ tryCatch({
+ req <- request(paste0(api_url, "/models/list"))
+ resp <- req_perform(req)
+ model_registry(resp_body_json(resp))
+ }, error = function(e) {
+ # print(paste("Failed to fetch model registry:", e$message))
+ # TODO: Show a fatal error modal to the user
+ })
+ })
+
+ # --- Task Panel Switching ---
+ # Show/hide the correct training UI based on the main task selector
+ observe({
+ task <- input$task_selector
+ if (task == "object_detection") {
+ shinyjs::show("obj_panel"); shinyjs::hide("asr_panel"); shinyjs::hide("img_class_panel"); shinyjs::hide("seg_panel")
+ } else if (task == "asr") {
+ shinyjs::hide("obj_panel"); shinyjs::show("asr_panel"); shinyjs::hide("img_class_panel"); shinyjs::hide("seg_panel")
+ } else if (task == "image_classification") {
+ shinyjs::hide("obj_panel"); shinyjs::hide("asr_panel"); shinyjs::show("img_class_panel"); shinyjs::hide("seg_panel")
+ } else if (task == "image_segmentation") {
+ shinyjs::hide("obj_panel"); shinyjs::hide("asr_panel"); shinyjs::hide("img_class_panel"); shinyjs::show("seg_panel")
+ }
+ })
+
+ # --- Chained Dropdown Logic (Populate Architectures) ---
+ observeEvent(c(model_registry(), input$task_selector), {
+ req(model_registry())
- api_url <- "http://23.135.236.5:3186"
-
- # ==============================================================================
- # == 1. CORE REACTIVE VALUES
- # ==============================================================================
-
- # --- For Live Job Polling ---
- polled_data <- reactiveVal(list(
- status = "Idle", task = "N/A", log = "", progress = list(percentage = 0, text = "Idle")
- ))
- polled_metrics <- reactiveVal(NULL)
- active_job_id <- reactiveVal(NULL)
-
- # --- For Data Management ---
- data_upload_status <- reactiveVal("")
- processing_dataset_id <- reactiveVal(NULL)
- refresh_data_trigger <- reactiveVal(0) # Triggers reload of dataset lists
-
- # --- For Model Registry ---
- model_registry <- reactiveVal(NULL)
-
- # --- For History Tab ---
- history_metrics <- reactiveVal(NULL)
- history_jobs_df <- reactiveVal(NULL)
- history_poller_active <- reactiveVal(FALSE)
-
- # --- For Inference Tab ---
- obj_inference_result <- reactiveVal(list(status = "Ready", image_url = NULL, error = NULL))
- asr_inference_result <- reactiveVal(list(status = "Ready", transcription = NULL, error = NULL))
- img_class_inference_result <- reactiveVal(list(status = "Ready", prediction = NULL, error = NULL))
- seg_inference_result <- reactiveVal(list(status = "Ready", image_url = NULL, error = NULL))
-
- # --- Table Proxies ---
- proxy_eval_table <- dataTableProxy("eval_table")
- proxy_dataset_table <- dataTableProxy("dataset_table")
- proxy_history_eval_table <- dataTableProxy("history_eval_table")
-
-
- # ==============================================================================
- # == 2. INITIALIZATION & SIDEBAR LOGIC
- # ==============================================================================
+ task_slug <- input$task_selector
+ arch_choices <- c("Loading..." = "")
- # --- Fetch Model Registry on Startup ---
- ## FIXME: This should only happen if deeplearning module is activated
- observe({
- tryCatch({
- req <- request(paste0(api_url, "/models/list"))
- resp <- req_perform(req)
- model_registry(resp_body_json(resp))
- }, error = function(e) {
- # print(paste("Failed to fetch model registry:", e$message))
- # TODO: Show a fatal error modal to the user
- })
+ if (task_slug == "object_detection") {
+ arch_choices <- names(model_registry()$object_detection)
+ updateSelectInput(session, "obj_model_arch", choices = arch_choices)
+ } else if (task_slug == "asr") {
+ arch_choices <- names(model_registry()$asr)
+ updateSelectInput(session, "asr_model_arch", choices = arch_choices)
+ } else if (task_slug == "image_classification") {
+ arch_choices <- names(model_registry()$image_classification)
+ updateSelectInput(session, "img_class_model_arch", choices = arch_choices)
+ } else if (task_slug == "image_segmentation") {
+ arch_choices <- names(model_registry()$image_segmentation)
+ updateSelectInput(session, "seg_model_arch", choices = arch_choices)
+ }
+ })
+
+ # --- Chained Dropdown Logic (Populate Checkpoints) ---
+ observeEvent(input$obj_model_arch, {
+ req(model_registry(), input$obj_model_arch, input$obj_model_arch != "Loading...")
+ checkpoints <- model_registry()$object_detection[[input$obj_model_arch]]
+ updateSelectInput(session, "obj_model_checkpoint", choices = checkpoints)
+ })
+ observeEvent(input$asr_model_arch, {
+ req(model_registry(), input$asr_model_arch, input$asr_model_arch != "Loading...")
+ checkpoints <- model_registry()$asr[[input$asr_model_arch]]
+ updateSelectInput(session, "asr_model_checkpoint", choices = checkpoints)
+ })
+ observeEvent(input$img_class_model_arch, {
+ req(model_registry(), input$img_class_model_arch, input$img_class_model_arch != "Loading...")
+ checkpoints <- model_registry()$image_classification[[input$img_class_model_arch]]
+ updateSelectInput(session, "img_class_model_checkpoint", choices = checkpoints)
+ })
+ observeEvent(input$seg_model_arch, {
+ req(model_registry(), input$seg_model_arch, input$seg_model_arch != "Loading...")
+ checkpoints <- model_registry()$image_segmentation[[input$seg_model_arch]]
+ updateSelectInput(session, "seg_model_checkpoint", choices = checkpoints)
+ })
+
+
+ # ==============================================================================
+ # == 3. "DATA MANAGEMENT" TAB LOGIC
+ # ==============================================================================
+
+ # --- Helper: Load Datasets for a specific task ---
+ load_datasets_for_task <- function(task_slug) {
+ tryCatch({
+ req <- request(paste0(api_url, "/data/list/", task_slug))
+ resp_data <- resp_body_json(req_perform(req), simplifyVector = TRUE)
+ if (length(resp_data) > 0 && nrow(resp_data) > 0) {
+ setNames(resp_data$id, resp_data$name)
+ } else {
+ c("No datasets found" = "")
+ }
+ }, error = function(e) {
+ c("Error loading datasets" = "")
})
-
- # --- Task Panel Switching ---
- # Show/hide the correct training UI based on the main task selector
- observe({
- task <- input$task_selector
- if (task == "object_detection") {
- shinyjs::show("obj_panel"); shinyjs::hide("asr_panel"); shinyjs::hide("img_class_panel"); shinyjs::hide("seg_panel")
- } else if (task == "asr") {
- shinyjs::hide("obj_panel"); shinyjs::show("asr_panel"); shinyjs::hide("img_class_panel"); shinyjs::hide("seg_panel")
- } else if (task == "image_classification") {
- shinyjs::hide("obj_panel"); shinyjs::hide("asr_panel"); shinyjs::show("img_class_panel"); shinyjs::hide("seg_panel")
- } else if (task == "image_segmentation") {
- shinyjs::hide("obj_panel"); shinyjs::hide("asr_panel"); shinyjs::hide("img_class_panel"); shinyjs::show("seg_panel")
- }
+ }
+
+ # --- Auto-refresh Dataset Dropdowns ---
+ # Triggered by: 1. Task selector change, 2. Data refresh trigger
+ observeEvent(c(input$task_selector, refresh_data_trigger()), {
+ task_slug <- input$task_selector
+ if (task_slug == "object_detection") {
+ updateSelectInput(session, "obj_dataset_id", choices = load_datasets_for_task("object_detection"))
+ } else if (task_slug == "asr") {
+ updateSelectInput(session, "asr_dataset_id", choices = load_datasets_for_task("asr"))
+ } else if (task_slug == "image_classification") {
+ updateSelectInput(session, "img_class_dataset_id", choices = load_datasets_for_task("image_classification"))
+ } else if (task_slug == "image_segmentation") {
+ updateSelectInput(session, "seg_dataset_id", choices = load_datasets_for_task("image_segmentation"))
+ }
+ }, ignoreNULL = TRUE, ignoreInit = TRUE)
+
+ # Manually trigger first data load on startup (after registry is loaded)
+ observeEvent(model_registry(), {
+ req(model_registry())
+ refresh_data_trigger(refresh_data_trigger() + 1)
+ }, once = TRUE)
+
+
+ # --- Handle Dataset Upload Button ---
+ observeEvent(input$start_data_upload, {
+ req(input$new_data_zip, input$new_data_name, input$new_data_task_type)
+ data_upload_status("Uploading...")
+ tryCatch({
+ req <- request(paste0(api_url, "/data/upload/", input$new_data_task_type)) %>%
+ req_body_multipart(
+ data_name = input$new_data_name,
+ data_zip = curl::form_file(input$new_data_zip$datapath, type = "application/zip")
+ )
+ resp <- req_perform(req)
+ resp_data <- resp_body_json(resp)
+ # Start the poller
+ processing_dataset_id(resp_data$dataset_id)
+ data_upload_status(paste("Success! Dataset", input$new_data_name, "is processing..."))
+
+ }, error = function(e) {
+ error_message <- as.character(e$message)
+ if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
+ data_upload_status(paste("Error:", error_message))
})
+ })
+
+ # --- Poller for Data Processing Status ---
+ observe({
+ ds_id <- processing_dataset_id()
+ req(ds_id) # Only run if we are processing a dataset
- # --- Chained Dropdown Logic (Populate Architectures) ---
- observeEvent(c(model_registry(), input$task_selector), {
- req(model_registry())
+ invalidateLater(2000, session) # Poll every 2 seconds
+
+ tryCatch({
+ req_status <- request(paste0(api_url, "/data/status/", ds_id))
+ resp <- req_perform(req_status)
+ status_data <- resp_body_json(resp)
+
+ if (status_data$status == "ready" || status_data$status == "failed") {
+ processing_dataset_id(NULL) # Stop polling
- task_slug <- input$task_selector
- arch_choices <- c("Loading..." = "")
+ # --- AUTO-REFRESH ---
+ refresh_data_trigger(refresh_data_trigger() + 1)
- if (task_slug == "object_detection") {
- arch_choices <- names(model_registry()$object_detection)
- updateSelectInput(session, "obj_model_arch", choices = arch_choices)
- } else if (task_slug == "asr") {
- arch_choices <- names(model_registry()$asr)
- updateSelectInput(session, "asr_model_arch", choices = arch_choices)
- } else if (task_slug == "image_classification") {
- arch_choices <- names(model_registry()$image_classification)
- updateSelectInput(session, "img_class_model_arch", choices = arch_choices)
- } else if (task_slug == "image_segmentation") {
- arch_choices <- names(model_registry()$image_segmentation)
- updateSelectInput(session, "seg_model_arch", choices = arch_choices)
+ if(status_data$status == "ready") {
+ data_upload_status(paste("Dataset processing complete!"))
+ } else {
+ data_upload_status(paste("Dataset processing failed:", status_data$error))
}
+
+ } else {
+ data_upload_status(paste("Processing dataset...", status_data$status))
+ }
+ }, error = function(e) {
+ data_upload_status("Error polling data status.")
+ processing_dataset_id(NULL) # Stop polling on error
})
+ })
+
+ # --- Data Management UI Outputs ---
+ output$data_upload_status <- renderText({ data_upload_status() })
+
+ output$dataset_table <- renderDT({
+ refresh_data_trigger() # React to the trigger
- # --- Chained Dropdown Logic (Populate Checkpoints) ---
- observeEvent(input$obj_model_arch, {
- req(model_registry(), input$obj_model_arch, input$obj_model_arch != "Loading...")
- checkpoints <- model_registry()$object_detection[[input$obj_model_arch]]
- updateSelectInput(session, "obj_model_checkpoint", choices = checkpoints)
- })
- observeEvent(input$asr_model_arch, {
- req(model_registry(), input$asr_model_arch, input$asr_model_arch != "Loading...")
- checkpoints <- model_registry()$asr[[input$asr_model_arch]]
- updateSelectInput(session, "asr_model_checkpoint", choices = checkpoints)
- })
- observeEvent(input$img_class_model_arch, {
- req(model_registry(), input$img_class_model_arch, input$img_class_model_arch != "Loading...")
- checkpoints <- model_registry()$image_classification[[input$img_class_model_arch]]
- updateSelectInput(session, "img_class_model_checkpoint", choices = checkpoints)
+ tryCatch({
+ tasks <- c("object_detection", "asr", "image_classification", "image_segmentation")
+ all_datasets <- lapply(tasks, function(task) {
+ req <- request(paste0(api_url, "/data/list/", task))
+ resp_data <- resp_body_json(req_perform(req), simplifyVector = TRUE)
+ if (length(resp_data) > 0 && nrow(resp_data) > 0) {
+ resp_data$task_type <- task
+ return(resp_data)
+ }
+ return(NULL)
+ })
+ bind_rows(all_datasets)
+ }, error = function(e) {
+ data.frame(name = "Error loading dataset list.", task_type = e$message)
})
- observeEvent(input$seg_model_arch, {
- req(model_registry(), input$seg_model_arch, input$seg_model_arch != "Loading...")
- checkpoints <- model_registry()$image_segmentation[[input$seg_model_arch]]
- updateSelectInput(session, "seg_model_checkpoint", choices = checkpoints)
+ })
+
+
+ # ==============================================================================
+ # == 4. TRAINING JOB SUBMISSION (One per task)
+ # ==============================================================================
+
+ # --- Helper: Resets UI before starting a job ---
+ reset_live_training_ui <- function(task_name) {
+ replaceData(proxy_eval_table, NULL, resetPaging = TRUE, clearSelection = TRUE)
+ active_job_id(NULL)
+ polled_metrics(NULL)
+ polled_data(list(status = "Submitting...", task = task_name, log = "Submitting job..."))
+ updateTabsetPanel(session, "main_tabs", selected = "Live Training")
+ }
+
+ # --- 4.1: Object Detection Job ---
+ observeEvent(input$start_obj_job, {
+ req(input$obj_dataset_id, input$obj_model_checkpoint)
+ reset_live_training_ui("Object Detection")
+
+ tryCatch({
+ req <- request(paste0(api_url, "/train/object-detection")) %>%
+ req_body_multipart(
+ # Common Params
+ dataset_id = as.character(input$obj_dataset_id),
+ model_checkpoint = as.character(input$obj_model_checkpoint),
+ run_name = as.character(input$obj_run_name),
+ version = as.character(input$obj_version),
+ epochs = as.character(input$obj_epochs),
+ train_batch_size = as.character(input$obj_train_batch_size),
+ eval_batch_size = as.character(input$obj_eval_batch_size),
+ seed = as.character(input$obj_seed),
+ num_proc = as.character(input$obj_num_proc),
+ early_stopping_patience = as.character(input$obj_early_stopping_patience),
+ push_to_hub = as.character(input$obj_push_to_hub),
+ hub_user_id = as.character(input$obj_hub_user_id),
+ log_to_wandb = as.character(input$obj_log_to_wandb),
+ wandb_project = as.character(input$obj_wandb_project),
+ wandb_entity = as.character(input$obj_wandb_entity),
+ max_image_size = as.character(input$obj_max_image_size),
+
+ # HF-Specific Params
+ learning_rate = as.character(input$obj_learning_rate),
+ weight_decay = as.character(input$obj_weight_decay),
+ gradient_accumulation_steps = as.character(input$obj_gradient_accumulation_steps),
+ gradient_checkpointing = as.character(input$obj_gradient_checkpointing),
+ max_grad_norm = as.character(input$obj_max_grad_norm),
+ fp16 = as.character(input$obj_fp16),
+ force_preprocess = as.character(input$obj_force_preprocess),
+ early_stopping_threshold = as.character(input$obj_early_stopping_threshold),
+
+ # YOLO-Specific Params
+ warmup_epochs = as.character(input$obj_yolo_warmup_epochs),
+ lr0 = as.character(input$obj_yolo_lr0),
+ momentum = as.character(input$obj_yolo_momentum),
+ optimizer = as.character(input$obj_yolo_optimizer),
+ weight_decay_yolo = as.character(input$obj_yolo_weight_decay)
+ )
+
+ resp <- req_perform(req)
+ resp_data <- resp_body_json(resp)
+ active_job_id(resp_data$job_id)
+ polled_data(list(status = "Queued", task = "Object Detection", log = "Job is queued."))
+
+ }, error = function(e) {
+ error_message <- as.character(e$message)
+ if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
+ polled_data(list(status = "Error", task = "Object Detection", log = error_message))
})
-
+ })
+
+ # --- 4.2: ASR Job ---
+ observeEvent(input$start_asr_job, {
+ req(input$asr_dataset_id, input$asr_model_checkpoint)
+ reset_live_training_ui("ASR")
- # ==============================================================================
- # == 3. "DATA MANAGEMENT" TAB LOGIC
- # ==============================================================================
-
- # --- Helper: Load Datasets for a specific task ---
- load_datasets_for_task <- function(task_slug) {
- tryCatch({
- req <- request(paste0(api_url, "/data/list/", task_slug))
- resp_data <- resp_body_json(req_perform(req), simplifyVector = TRUE)
- if (length(resp_data) > 0 && nrow(resp_data) > 0) {
- setNames(resp_data$id, resp_data$name)
- } else {
- c("No datasets found" = "")
- }
- }, error = function(e) {
- c("Error loading datasets" = "")
- })
+ outlier_val <- input$outlier_std_devs
+ if (is.null(outlier_val) || is.na(outlier_val) || !is.numeric(outlier_val) || !input$asr_apply_outlier_filtering) {
+ outlier_val <- 2.0
}
- # --- Auto-refresh Dataset Dropdowns ---
- # Triggered by: 1. Task selector change, 2. Data refresh trigger
- observeEvent(c(input$task_selector, refresh_data_trigger()), {
- task_slug <- input$task_selector
- if (task_slug == "object_detection") {
- updateSelectInput(session, "obj_dataset_id", choices = load_datasets_for_task("object_detection"))
- } else if (task_slug == "asr") {
- updateSelectInput(session, "asr_dataset_id", choices = load_datasets_for_task("asr"))
- } else if (task_slug == "image_classification") {
- updateSelectInput(session, "img_class_dataset_id", choices = load_datasets_for_task("image_classification"))
- } else if (task_slug == "image_segmentation") {
- updateSelectInput(session, "seg_dataset_id", choices = load_datasets_for_task("image_segmentation"))
- }
- }, ignoreNULL = TRUE, ignoreInit = TRUE)
-
- # Manually trigger first data load on startup (after registry is loaded)
- observeEvent(model_registry(), {
- req(model_registry())
- refresh_data_trigger(refresh_data_trigger() + 1)
- }, once = TRUE)
-
+ max_hours <- if (is.na(input$asr_max_train_hours) || is.null(input$asr_max_train_hours)) NULL else as.character(input$asr_max_train_hours)
- # --- Handle Dataset Upload Button ---
- observeEvent(input$start_data_upload, {
- req(input$new_data_zip, input$new_data_name, input$new_data_task_type)
- data_upload_status("Uploading...")
- tryCatch({
- req <- request(paste0(api_url, "/data/upload/", input$new_data_task_type)) %>%
- req_body_multipart(
- data_name = input$new_data_name,
- data_zip = curl::form_file(input$new_data_zip$datapath, type = "application/zip")
- )
- resp <- req_perform(req)
- resp_data <- resp_body_json(resp)
- # Start the poller
- processing_dataset_id(resp_data$dataset_id)
- data_upload_status(paste("Success! Dataset", input$new_data_name, "is processing..."))
-
- }, error = function(e) {
- error_message <- as.character(e$message)
- if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
- data_upload_status(paste("Error:", error_message))
- })
+ tryCatch({
+ req_list <- list(
+ dataset_id = as.character(input$asr_dataset_id),
+ model_checkpoint = as.character(input$asr_model_checkpoint),
+ run_name = as.character(input$asr_run_name),
+ version = as.character(input$asr_version),
+ language = as.character(input$asr_language),
+ language_code = as.character(input$asr_language_code),
+ speaker_id_column = as.character(input$asr_speaker_id_column),
+ text_column = as.character(input$asr_text_column),
+ target_sampling_rate = as.character(input$asr_target_sampling_rate),
+ min_duration_s = as.character(input$asr_min_duration_s),
+ max_duration_s = as.character(input$asr_max_duration_s),
+ min_transcript_len = as.character(input$asr_min_transcript_len),
+ max_transcript_len = as.character(input$asr_max_transcript_len),
+ apply_outlier_filtering = as.character(input$asr_apply_outlier_filtering),
+ outlier_std_devs = as.character(outlier_val),
+ is_presplit = as.character(input$asr_is_presplit),
+ speaker_disjointness = as.character(input$asr_speaker_disjointness),
+ train_ratio = as.character(input$asr_train_ratio),
+ dev_ratio = as.character(input$asr_dev_ratio),
+ test_ratio = as.character(input$asr_test_ratio),
+ epochs = as.character(input$asr_epochs),
+ learning_rate = as.character(input$asr_learning_rate),
+ lr_scheduler_type = as.character(input$asr_lr_scheduler_type),
+ warmup_ratio = as.character(input$asr_warmup_ratio),
+ train_batch_size = as.character(input$asr_train_batch_size),
+ eval_batch_size = as.character(input$asr_eval_batch_size),
+ gradient_accumulation_steps = as.character(input$asr_gradient_accumulation_steps),
+ gradient_checkpointing = as.character(input$asr_gradient_checkpointing),
+ optimizer = as.character(input$asr_optimizer),
+ early_stopping_patience = as.character(input$asr_early_stopping_patience),
+ early_stopping_threshold = as.character(input$asr_early_stopping_threshold),
+ push_to_hub = as.character(input$asr_push_to_hub),
+ hub_user_id = as.character(input$asr_hub_user_id),
+ hub_private_repo = as.character(input$asr_hub_private_repo),
+ log_to_wandb = as.character(input$asr_log_to_wandb),
+ wandb_project = as.character(input$asr_wandb_project),
+ wandb_entity = as.character(input$asr_wandb_entity),
+ seed = as.character(input$asr_seed),
+ num_proc = as.character(input$asr_num_proc),
+ max_train_hours = max_hours
+ )
+
+ req_list <- req_list[!sapply(req_list, is.null)]
+
+ req <- request(paste0(api_url, "/train/asr")) %>%
+ req_body_multipart(!!!req_list)
+
+ resp <- req_perform(req)
+ resp_data <- resp_body_json(resp)
+ active_job_id(resp_data$job_id)
+ polled_data(list(status = "Queued", task = "ASR", log = "Job is queued."))
+
+ }, error = function(e) {
+ error_message <- as.character(e$message)
+ if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
+ polled_data(list(status = "Error", task = "ASR", log = error_message))
})
+ })
+
+ # --- 4.3: Image Classification Job ---
+ observeEvent(input$start_img_class_job, {
+ req(input$img_class_dataset_id, input$img_class_model_checkpoint)
+ reset_live_training_ui("Image Classification")
- # --- Poller for Data Processing Status ---
- observe({
- ds_id <- processing_dataset_id()
- req(ds_id) # Only run if we are processing a dataset
-
- invalidateLater(2000, session) # Poll every 2 seconds
-
- tryCatch({
- req_status <- request(paste0(api_url, "/data/status/", ds_id))
- resp <- req_perform(req_status)
- status_data <- resp_body_json(resp)
-
- if (status_data$status == "ready" || status_data$status == "failed") {
- processing_dataset_id(NULL) # Stop polling
-
- # --- AUTO-REFRESH ---
- refresh_data_trigger(refresh_data_trigger() + 1)
-
- if(status_data$status == "ready") {
- data_upload_status(paste("Dataset processing complete!"))
- } else {
- data_upload_status(paste("Dataset processing failed:", status_data$error))
- }
-
- } else {
- data_upload_status(paste("Processing dataset...", status_data$status))
- }
- }, error = function(e) {
- data_upload_status("Error polling data status.")
- processing_dataset_id(NULL) # Stop polling on error
- })
+ tryCatch({
+ req <- request(paste0(api_url, "/train/image-classification")) %>%
+ req_body_multipart(
+ dataset_id = as.character(input$img_class_dataset_id),
+ model_checkpoint = as.character(input$img_class_model_checkpoint),
+ run_name = as.character(input$img_class_run_name),
+ version = as.character(input$img_class_version),
+ epochs = as.character(input$img_class_epochs),
+ learning_rate = as.character(input$img_class_learning_rate),
+ weight_decay = as.character(input$img_class_weight_decay),
+ train_batch_size = as.character(input$img_class_train_batch_size),
+ eval_batch_size = as.character(input$img_class_eval_batch_size),
+ max_image_size = as.character(input$img_class_max_image_size),
+ gradient_accumulation_steps = as.character(input$img_class_grad_accum),
+ gradient_checkpointing = as.character(input$img_class_grad_check),
+ fp16 = as.character(input$img_class_fp16),
+ seed = as.character(input$img_class_seed),
+ early_stopping_patience = as.character(input$img_class_early_stop),
+ push_to_hub = as.character(input$img_class_push_to_hub),
+ hub_user_id = as.character(input$img_class_hub_user_id),
+ log_to_wandb = as.character(input$img_class_log_to_wandb),
+ wandb_project = as.character(input$img_class_wandb_project),
+ wandb_entity = as.character(input$img_class_wandb_entity),
+ num_proc = as.character(input$img_class_num_proc),
+ is_presplit = as.character(input$img_class_is_presplit),
+ train_ratio = as.character(input$img_class_train_ratio),
+ dev_ratio = as.character(input$img_class_dev_ratio)
+ )
+
+ resp <- req_perform(req)
+ active_job_id(resp_body_json(resp)$job_id)
+ }, error = function(e) {
+ polled_data(list(status = "Error", task = "Image Classification", log = as.character(e)))
})
+ })
+
+ # --- 4.4: Image Segmentation Job ---
+ observeEvent(input$start_seg_job, {
+ req(input$seg_dataset_id, input$seg_model_checkpoint)
+ reset_live_training_ui("Image Segmentation")
- # --- Data Management UI Outputs ---
- output$data_upload_status <- renderText({ data_upload_status() })
-
- output$dataset_table <- renderDT({
- refresh_data_trigger() # React to the trigger
-
- tryCatch({
- tasks <- c("object_detection", "asr", "image_classification", "image_segmentation")
- all_datasets <- lapply(tasks, function(task) {
- req <- request(paste0(api_url, "/data/list/", task))
- resp_data <- resp_body_json(req_perform(req), simplifyVector = TRUE)
- if (length(resp_data) > 0 && nrow(resp_data) > 0) {
- resp_data$task_type <- task
- return(resp_data)
- }
- return(NULL)
- })
- bind_rows(all_datasets)
- }, error = function(e) {
- data.frame(name = "Error loading dataset list.", task_type = e$message)
- })
+ tryCatch({
+ req <- request(paste0(api_url, "/train/image-segmentation")) %>%
+ req_body_multipart(
+ dataset_id = as.character(input$seg_dataset_id),
+ model_checkpoint = as.character(input$seg_model_checkpoint),
+ run_name = as.character(input$seg_run_name),
+ version = as.character(input$seg_version),
+ epochs = as.character(input$seg_epochs),
+ learning_rate = as.character(input$seg_learning_rate),
+ weight_decay = as.character(input$seg_weight_decay),
+ train_batch_size = as.character(input$seg_train_batch_size),
+ eval_batch_size = as.character(input$seg_eval_batch_size),
+ max_image_size = as.character(input$seg_max_image_size),
+ gradient_accumulation_steps = as.character(input$seg_grad_accum),
+ gradient_checkpointing = as.character(input$seg_grad_check),
+ fp16 = as.character(input$seg_fp16),
+ seed = as.character(input$seg_seed),
+ early_stopping_patience = as.character(input$seg_early_stop),
+ push_to_hub = as.character(input$seg_push_to_hub),
+ hub_user_id = as.character(input$seg_hub_user_id),
+ log_to_wandb = as.character(input$seg_log_to_wandb),
+ wandb_project = as.character(input$seg_wandb_project),
+ wandb_entity = as.character(input$seg_wandb_entity),
+ num_proc = as.character(input$seg_num_proc),
+ is_presplit = as.character(input$seg_is_presplit),
+ train_ratio = as.character(input$seg_train_ratio),
+ dev_ratio = as.character(input$seg_dev_ratio)
+ )
+
+ resp <- req_perform(req)
+ active_job_id(resp_body_json(resp)$job_id)
+ }, error = function(e) {
+ error_message <- as.character(e$message)
+ if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
+ polled_data(list(status = "Error", task = "Image Segmentation", log = error_message))
})
-
+ })
+
+
+ # ==============================================================================
+ # == 5. "LIVE TRAINING" TAB LOGIC
+ # ==============================================================================
+
+ # --- 5.1: Job Polling & Status Display ---
+ observe({
+ job_id <- active_job_id()
+ current_status <- polled_data()$status
- # ==============================================================================
- # == 4. TRAINING JOB SUBMISSION (One per task)
- # ==============================================================================
+ # Only poll if we have an active job that isn't finished
+ if (!is.null(job_id) && !(current_status %in% c("completed", "failed", "Error", "Polling Error"))) {
+ invalidateLater(2000, session)
+
+ # Poll for Status & Log
+ tryCatch({
+ req_status <- request(paste0(api_url, "/status/", job_id))
+ resp_status <- req_perform(req_status)
+ if (resp_status(resp_status) == 200) {
+ polled_data(resp_body_json(resp_status))
+ }
+ }, error = function(e) {
+ current_data <- polled_data()
+ current_data$status <- "Polling Error"
+ polled_data(current_data)
+ })
+
+ # Poll for Metrics
+ tryCatch({
+ req_metrics <- request(paste0(api_url, "/metrics/", job_id))
+ resp_metrics <- req_perform(req_metrics)
+ if (resp_status(resp_metrics) == 200) {
+ polled_metrics(resp_body_json(resp_metrics))
+ }
+ }, error = function(e) {
+ polled_metrics(NULL)
+ })
+ }
+ })
+
+ # --- 5.2: Job Status Panel Outputs ---
+ output$job_task_display <- renderText({ polled_data()$task })
+ output$job_id_display <- renderText({ ifelse(is.null(active_job_id()), "None", active_job_id()) })
+ output$job_status_display <- renderText({ polled_data()$status })
+
+ # --- 5.3: Metrics Table Panel Output ---
+ output$eval_table <- renderDT({
+ metrics_list <- polled_metrics()
+ req(metrics_list, length(metrics_list) > 0)
+
+ metrics_df <- bind_rows(metrics_list)
- # --- Helper: Resets UI before starting a job ---
- reset_live_training_ui <- function(task_name) {
- replaceData(proxy_eval_table, NULL, resetPaging = TRUE, clearSelection = TRUE)
- active_job_id(NULL)
- polled_metrics(NULL)
- polled_data(list(status = "Submitting...", task = task_name, log = "Submitting job..."))
- updateTabsetPanel(session, "main_tabs", selected = "Live Training")
+ if (nrow(metrics_df) > 0) {
+ display_df <- metrics_df %>%
+ pivot_longer(
+ cols = starts_with("eval_") | starts_with("test_"),
+ names_to = "metric_name",
+ values_to = "value",
+ values_drop_na = TRUE
+ ) %>%
+ separate(metric_name, into = c("step", "metric"), sep = "_", extra = "merge") %>%
+ pivot_wider(
+ names_from = metric,
+ values_from = value,
+ values_fn = first
+ ) %>%
+ select(any_of(c("step", "epoch", "loss", "map", "wer", "cer", "mean_iou")), everything())
+
+ datatable(
+ display_df,
+ options = list(
+ pageLength = 5,
+ scrollX = TRUE,
+ searching = FALSE,
+ autoWidth = TRUE,
+ class = 'cell-border stripe'
+ ),
+ rownames = FALSE
+ )
+ } else {
+ return(NULL)
}
-
- # --- 4.1: Object Detection Job ---
- observeEvent(input$start_obj_job, {
- req(input$obj_dataset_id, input$obj_model_checkpoint)
- reset_live_training_ui("Object Detection")
-
- tryCatch({
- req <- request(paste0(api_url, "/train/object-detection")) %>%
- req_body_multipart(
- # Common Params
- dataset_id = as.character(input$obj_dataset_id),
- model_checkpoint = as.character(input$obj_model_checkpoint),
- run_name = as.character(input$obj_run_name),
- version = as.character(input$obj_version),
- epochs = as.character(input$obj_epochs),
- train_batch_size = as.character(input$obj_train_batch_size),
- eval_batch_size = as.character(input$obj_eval_batch_size),
- seed = as.character(input$obj_seed),
- num_proc = as.character(input$obj_num_proc),
- early_stopping_patience = as.character(input$obj_early_stopping_patience),
- push_to_hub = as.character(input$obj_push_to_hub),
- hub_user_id = as.character(input$obj_hub_user_id),
- log_to_wandb = as.character(input$obj_log_to_wandb),
- wandb_project = as.character(input$obj_wandb_project),
- wandb_entity = as.character(input$obj_wandb_entity),
- max_image_size = as.character(input$obj_max_image_size),
-
- # HF-Specific Params
- learning_rate = as.character(input$obj_learning_rate),
- weight_decay = as.character(input$obj_weight_decay),
- gradient_accumulation_steps = as.character(input$obj_gradient_accumulation_steps),
- gradient_checkpointing = as.character(input$obj_gradient_checkpointing),
- max_grad_norm = as.character(input$obj_max_grad_norm),
- fp16 = as.character(input$obj_fp16),
- force_preprocess = as.character(input$obj_force_preprocess),
- early_stopping_threshold = as.character(input$obj_early_stopping_threshold),
-
- # YOLO-Specific Params
- warmup_epochs = as.character(input$obj_yolo_warmup_epochs),
- lr0 = as.character(input$obj_yolo_lr0),
- momentum = as.character(input$obj_yolo_momentum),
- optimizer = as.character(input$obj_yolo_optimizer),
- weight_decay_yolo = as.character(input$obj_yolo_weight_decay)
- )
-
- resp <- req_perform(req)
- resp_data <- resp_body_json(resp)
- active_job_id(resp_data$job_id)
- polled_data(list(status = "Queued", task = "Object Detection", log = "Job is queued."))
-
- }, error = function(e) {
- error_message <- as.character(e$message)
- if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
- polled_data(list(status = "Error", task = "Object Detection", log = error_message))
- })
- })
+ })
+
+ # --- 5.4: Full Log Panel Output ---
+ output$log_output <- renderText({
+ log_text <- polled_data()$log
+ if (is.null(log_text) || nchar(log_text) == 0) {
+ return("No log output...")
+ }
+ return(log_text)
+ })
+
+ # --- 5.5: Plotting Logic for "Live Training" Tab ---
+ metrics_for_plotting <- reactive({
+ metrics_list <- polled_metrics()
+ req(metrics_list, length(metrics_list) > 0)
- # --- 4.2: ASR Job ---
- observeEvent(input$start_asr_job, {
- req(input$asr_dataset_id, input$asr_model_checkpoint)
- reset_live_training_ui("ASR")
-
- outlier_val <- input$outlier_std_devs
- if (is.null(outlier_val) || is.na(outlier_val) || !is.numeric(outlier_val) || !input$asr_apply_outlier_filtering) {
- outlier_val <- 2.0
- }
-
- max_hours <- if (is.na(input$asr_max_train_hours) || is.null(input$asr_max_train_hours)) NULL else as.character(input$asr_max_train_hours)
-
- tryCatch({
- req_list <- list(
- dataset_id = as.character(input$asr_dataset_id),
- model_checkpoint = as.character(input$asr_model_checkpoint),
- run_name = as.character(input$asr_run_name),
- version = as.character(input$asr_version),
- language = as.character(input$asr_language),
- language_code = as.character(input$asr_language_code),
- speaker_id_column = as.character(input$asr_speaker_id_column),
- text_column = as.character(input$asr_text_column),
- target_sampling_rate = as.character(input$asr_target_sampling_rate),
- min_duration_s = as.character(input$asr_min_duration_s),
- max_duration_s = as.character(input$asr_max_duration_s),
- min_transcript_len = as.character(input$asr_min_transcript_len),
- max_transcript_len = as.character(input$asr_max_transcript_len),
- apply_outlier_filtering = as.character(input$asr_apply_outlier_filtering),
- outlier_std_devs = as.character(outlier_val),
- is_presplit = as.character(input$asr_is_presplit),
- speaker_disjointness = as.character(input$asr_speaker_disjointness),
- train_ratio = as.character(input$asr_train_ratio),
- dev_ratio = as.character(input$asr_dev_ratio),
- test_ratio = as.character(input$asr_test_ratio),
- epochs = as.character(input$asr_epochs),
- learning_rate = as.character(input$asr_learning_rate),
- lr_scheduler_type = as.character(input$asr_lr_scheduler_type),
- warmup_ratio = as.character(input$asr_warmup_ratio),
- train_batch_size = as.character(input$asr_train_batch_size),
- eval_batch_size = as.character(input$asr_eval_batch_size),
- gradient_accumulation_steps = as.character(input$asr_gradient_accumulation_steps),
- gradient_checkpointing = as.character(input$asr_gradient_checkpointing),
- optimizer = as.character(input$asr_optimizer),
- early_stopping_patience = as.character(input$asr_early_stopping_patience),
- early_stopping_threshold = as.character(input$asr_early_stopping_threshold),
- push_to_hub = as.character(input$asr_push_to_hub),
- hub_user_id = as.character(input$asr_hub_user_id),
- hub_private_repo = as.character(input$asr_hub_private_repo),
- log_to_wandb = as.character(input$asr_log_to_wandb),
- wandb_project = as.character(input$asr_wandb_project),
- wandb_entity = as.character(input$asr_wandb_entity),
- seed = as.character(input$asr_seed),
- num_proc = as.character(input$asr_num_proc),
- max_train_hours = max_hours
- )
-
- req_list <- req_list[!sapply(req_list, is.null)]
-
- req <- request(paste0(api_url, "/train/asr")) %>%
- req_body_multipart(!!!req_list)
-
- resp <- req_perform(req)
- resp_data <- resp_body_json(resp)
- active_job_id(resp_data$job_id)
- polled_data(list(status = "Queued", task = "ASR", log = "Job is queued."))
-
- }, error = function(e) {
- error_message <- as.character(e$message)
- if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
- polled_data(list(status = "Error", task = "ASR", log = error_message))
- })
- })
-
- # --- 4.3: Image Classification Job ---
- observeEvent(input$start_img_class_job, {
- req(input$img_class_dataset_id, input$img_class_model_checkpoint)
- reset_live_training_ui("Image Classification")
-
- tryCatch({
- req <- request(paste0(api_url, "/train/image-classification")) %>%
- req_body_multipart(
- dataset_id = as.character(input$img_class_dataset_id),
- model_checkpoint = as.character(input$img_class_model_checkpoint),
- run_name = as.character(input$img_class_run_name),
- version = as.character(input$img_class_version),
- epochs = as.character(input$img_class_epochs),
- learning_rate = as.character(input$img_class_learning_rate),
- weight_decay = as.character(input$img_class_weight_decay),
- train_batch_size = as.character(input$img_class_train_batch_size),
- eval_batch_size = as.character(input$img_class_eval_batch_size),
- max_image_size = as.character(input$img_class_max_image_size),
- gradient_accumulation_steps = as.character(input$img_class_grad_accum),
- gradient_checkpointing = as.character(input$img_class_grad_check),
- fp16 = as.character(input$img_class_fp16),
- seed = as.character(input$img_class_seed),
- early_stopping_patience = as.character(input$img_class_early_stop),
- push_to_hub = as.character(input$img_class_push_to_hub),
- hub_user_id = as.character(input$img_class_hub_user_id),
- log_to_wandb = as.character(input$img_class_log_to_wandb),
- wandb_project = as.character(input$img_class_wandb_project),
- wandb_entity = as.character(input$img_class_wandb_entity),
- num_proc = as.character(input$img_class_num_proc),
- is_presplit = as.character(input$img_class_is_presplit),
- train_ratio = as.character(input$img_class_train_ratio),
- dev_ratio = as.character(input$img_class_dev_ratio)
- )
-
- resp <- req_perform(req)
- active_job_id(resp_body_json(resp)$job_id)
- }, error = function(e) {
- polled_data(list(status = "Error", task = "Image Classification", log = as.character(e)))
- })
- })
+ bind_rows(metrics_list) %>%
+ filter(!is.na(epoch)) %>%
+ filter(if_any(everything(), ~ !is.na(.))) %>%
+ select(starts_with("eval_"), epoch) %>%
+ arrange(epoch) %>%
+ distinct(epoch, .keep_all = TRUE)
+ })
+
+ output$metric_selector_ui <- renderUI({
+ df <- tryCatch(metrics_for_plotting(), error = function(e) NULL)
- # --- 4.4: Image Segmentation Job ---
- observeEvent(input$start_seg_job, {
- req(input$seg_dataset_id, input$seg_model_checkpoint)
- reset_live_training_ui("Image Segmentation")
-
- tryCatch({
- req <- request(paste0(api_url, "/train/image-segmentation")) %>%
- req_body_multipart(
- dataset_id = as.character(input$seg_dataset_id),
- model_checkpoint = as.character(input$seg_model_checkpoint),
- run_name = as.character(input$seg_run_name),
- version = as.character(input$seg_version),
- epochs = as.character(input$seg_epochs),
- learning_rate = as.character(input$seg_learning_rate),
- weight_decay = as.character(input$seg_weight_decay),
- train_batch_size = as.character(input$seg_train_batch_size),
- eval_batch_size = as.character(input$seg_eval_batch_size),
- max_image_size = as.character(input$seg_max_image_size),
- gradient_accumulation_steps = as.character(input$seg_grad_accum),
- gradient_checkpointing = as.character(input$seg_grad_check),
- fp16 = as.character(input$seg_fp16),
- seed = as.character(input$seg_seed),
- early_stopping_patience = as.character(input$seg_early_stop),
- push_to_hub = as.character(input$seg_push_to_hub),
- hub_user_id = as.character(input$seg_hub_user_id),
- log_to_wandb = as.character(input$seg_log_to_wandb),
- wandb_project = as.character(input$seg_wandb_project),
- wandb_entity = as.character(input$seg_wandb_entity),
- num_proc = as.character(input$seg_num_proc),
- is_presplit = as.character(input$seg_is_presplit),
- train_ratio = as.character(input$seg_train_ratio),
- dev_ratio = as.character(input$seg_dev_ratio)
- )
-
- resp <- req_perform(req)
- active_job_id(resp_body_json(resp)$job_id)
- }, error = function(e) {
- error_message <- as.character(e$message)
- if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
- polled_data(list(status = "Error", task = "Image Segmentation", log = error_message))
- })
- })
-
-
- # ==============================================================================
- # == 5. "LIVE TRAINING" TAB LOGIC
- # ==============================================================================
-
- # --- 5.1: Job Polling & Status Display ---
- observe({
- job_id <- active_job_id()
- current_status <- polled_data()$status
-
- # Only poll if we have an active job that isn't finished
- if (!is.null(job_id) && !(current_status %in% c("completed", "failed", "Error", "Polling Error"))) {
- invalidateLater(2000, session)
-
- # Poll for Status & Log
- tryCatch({
- req_status <- request(paste0(api_url, "/status/", job_id))
- resp_status <- req_perform(req_status)
- if (resp_status(resp_status) == 200) {
- polled_data(resp_body_json(resp_status))
- }
- }, error = function(e) {
- current_data <- polled_data()
- current_data$status <- "Polling Error"
- polled_data(current_data)
- })
-
- # Poll for Metrics
- tryCatch({
- req_metrics <- request(paste0(api_url, "/metrics/", job_id))
- resp_metrics <- req_perform(req_metrics)
- if (resp_status(resp_metrics) == 200) {
- polled_metrics(resp_body_json(resp_metrics))
- }
- }, error = function(e) {
- polled_metrics(NULL)
- })
- }
- })
+ if (is.null(df) || nrow(df) == 0) {
+ return(p("Waiting for first evaluation epoch to complete..."))
+ }
- # --- 5.2: Job Status Panel Outputs ---
- output$job_task_display <- renderText({ polled_data()$task })
- output$job_id_display <- renderText({ ifelse(is.null(active_job_id()), "None", active_job_id()) })
- output$job_status_display <- renderText({ polled_data()$status })
+ metric_names <- names(df)[sapply(df, is.numeric) & !names(df) %in% c("epoch", "step", "runtime", "samples_per_second", "steps_per_second")]
- # --- 5.3: Metrics Table Panel Output ---
- output$eval_table <- renderDT({
- metrics_list <- polled_metrics()
- req(metrics_list, length(metrics_list) > 0)
-
- metrics_df <- bind_rows(metrics_list)
-
- if (nrow(metrics_df) > 0) {
- display_df <- metrics_df %>%
- pivot_longer(
- cols = starts_with("eval_") | starts_with("test_"),
- names_to = "metric_name",
- values_to = "value",
- values_drop_na = TRUE
- ) %>%
- separate(metric_name, into = c("step", "metric"), sep = "_", extra = "merge") %>%
- pivot_wider(
- names_from = metric,
- values_from = value,
- values_fn = first
- ) %>%
- select(any_of(c("step", "epoch", "loss", "map", "wer", "cer", "mean_iou")), everything())
-
- datatable(
- display_df,
- options = list(
- pageLength = 5,
- scrollX = TRUE,
- searching = FALSE,
- autoWidth = TRUE,
- class = 'cell-border stripe'
- ),
- rownames = FALSE
- )
- } else {
- return(NULL)
- }
- })
-
- # --- 5.4: Full Log Panel Output ---
- output$log_output <- renderText({
- log_text <- polled_data()$log
- if (is.null(log_text) || nchar(log_text) == 0) {
- return("No log output...")
- }
- return(log_text)
- })
-
- # --- 5.5: Plotting Logic for "Live Training" Tab ---
- metrics_for_plotting <- reactive({
- metrics_list <- polled_metrics()
- req(metrics_list, length(metrics_list) > 0)
-
- bind_rows(metrics_list) %>%
- filter(!is.na(epoch)) %>%
- filter(if_any(everything(), ~ !is.na(.))) %>%
- select(starts_with("eval_"), epoch) %>%
- arrange(epoch) %>%
- distinct(epoch, .keep_all = TRUE)
- })
+ default_metric <- "eval_loss"
+ if (!"eval_loss" %in% metric_names && length(metric_names) > 0) {
+ default_metric <- metric_names[1]
+ }
- output$metric_selector_ui <- renderUI({
- df <- tryCatch(metrics_for_plotting(), error = function(e) NULL)
-
- if (is.null(df) || nrow(df) == 0) {
- return(p("Waiting for first evaluation epoch to complete..."))
- }
-
- metric_names <- names(df)[sapply(df, is.numeric) & !names(df) %in% c("epoch", "step", "runtime", "samples_per_second", "steps_per_second")]
-
- default_metric <- "eval_loss"
- if (!"eval_loss" %in% metric_names && length(metric_names) > 0) {
- default_metric <- metric_names[1]
- }
-
- selected_val <- input$selected_metric
- if (is.null(selected_val) || !selected_val %in% metric_names) {
- selected_val <- default_metric
- }
-
- selectInput("selected_metric", "Select Metric to Plot:",
- choices = metric_names,
- selected = selected_val)
- })
+ selected_val <- input$selected_metric
+ if (is.null(selected_val) || !selected_val %in% metric_names) {
+ selected_val <- default_metric
+ }
- output$dynamic_metric_plot <- renderDygraph({
- df <- tryCatch(metrics_for_plotting(), error = function(e) NULL)
-
- if (is.null(df) || nrow(df) == 0) {
- return(dygraph(data.frame(x=c(0), y=c(0)), main = "Waiting for Epoch 1") %>%
+ selectInput("selected_metric", "Select Metric to Plot:",
+ choices = metric_names,
+ selected = selected_val)
+ })
+
+ output$dynamic_metric_plot <- renderDygraph({
+ df <- tryCatch(metrics_for_plotting(), error = function(e) NULL)
+
+ if (is.null(df) || nrow(df) == 0) {
+ return(dygraph(data.frame(x=c(0), y=c(0)), main = "Waiting for Epoch 1") %>%
dyOptions(drawGrid = FALSE, drawAxes = FALSE, drawYAxis = FALSE, drawXAxis = FALSE) %>%
dyAxis("x", label = "Epoch"))
- }
-
- req(input$selected_metric)
- req(input$selected_metric %in% names(df))
-
- metric_to_plot <- input$selected_metric
-
- if (!"epoch" %in% names(df) || !metric_to_plot %in% names(df)) {
- return(dygraph(data.frame(x=c(0), y=c(0)), main = "Metric data not yet available") %>%
+ }
+
+ req(input$selected_metric)
+ req(input$selected_metric %in% names(df))
+
+ metric_to_plot <- input$selected_metric
+
+ if (!"epoch" %in% names(df) || !metric_to_plot %in% names(df)) {
+ return(dygraph(data.frame(x=c(0), y=c(0)), main = "Metric data not yet available") %>%
dyOptions(drawGrid = FALSE, drawAxes = FALSE, drawYAxis = FALSE, drawXAxis = FALSE))
- }
-
- plot_data <- df[, c("epoch", metric_to_plot)]
-
- dygraph(plot_data, main = paste(metric_to_plot, "vs. Epoch")) %>%
- dySeries(metric_to_plot, label = metric_to_plot) %>%
- dyAxis("x", label = "Epoch", valueRange = c(0, max(df$epoch, na.rm = TRUE) + 1)) %>%
- dyRangeSelector() %>%
- dyOptions(stackedGraph = FALSE,
- fillGraph = FALSE,
- stepPlot = FALSE,
- drawPoints = TRUE,
- pointSize = 4) %>%
- dyLegend(show = "always", width = 200)
- })
+ }
+ plot_data <- df[, c("epoch", metric_to_plot)]
- # ==============================================================================
- # == 6. "TRAINING HISTORY" TAB LOGIC
- # ==============================================================================
-
- # --- 6.1: Populate the Job Selector Dropdown (with Filters) ---
- observe({
- # React to tab switching, job completion, and filter changes
- input$main_tabs
- polled_data()
- input$history_task_filter
- input$history_status_filter
-
- tryCatch({
-
- query_params <- list()
- if (input$history_task_filter != "all") {
- query_params$task_type <- input$history_task_filter
- }
- if (input$history_status_filter != "all") {
- query_params$status <- input$history_status_filter
- }
-
- req <- request(paste0(api_url, "/jobs/list"))
- if (length(query_params) > 0) {
- req <- req_url_query(req, !!!query_params)
- }
-
- resp <- req_perform(req)
- jobs_list_raw <- resp_body_json(resp, simplifyVector = FALSE)
-
- if (length(jobs_list_raw) > 0) {
-
- jobs_df <- bind_rows(lapply(jobs_list_raw, function(job) {
- data.frame(
- id = job$id,
- task_type = job$task_type,
- status = job$status,
- run_name = ifelse(is.null(job$details$run_name), "N/A", job$details$run_name)
- )
- }))
-
- history_jobs_df(jobs_df)
-
- job_names <- paste0(
- jobs_df$run_name,
- " (", jobs_df$task_type, " | ID: ", substr(jobs_df$id, 1, 8), ") - ",
- jobs_df$status
- )
- job_choices <- setNames(jobs_df$id, job_names)
-
- updateSelectInput(session, "history_job_selector", choices = job_choices)
-
- } else {
- updateSelectInput(session, "history_job_selector", choices = c("No jobs found" = ""))
- history_jobs_df(NULL)
- }
- }, error = function(e) {
- updateSelectInput(session, "history_job_selector", choices = c("Error loading jobs" = ""))
- history_jobs_df(NULL)
- })
- })
-
- # --- 6.2: Fetch Metrics when user selects a historical job ---
- observeEvent(input$history_job_selector, {
- job_id <- input$history_job_selector
-
- history_poller_active(FALSE) # Deactivate poller by default
-
- if (!is.null(job_id) && nchar(job_id) > 0) {
- tryCatch({
- req <- request(paste0(api_url, "/metrics/", job_id))
- resp <- req_perform(req)
- history_metrics(resp_body_json(resp))
- }, error = function(e) {
- history_metrics(NULL)
- })
-
- # --- Poller Activation Logic ---
- req(history_jobs_df())
- job_info <- history_jobs_df() %>% filter(id == job_id)
- if (nrow(job_info) > 0 && job_info$status == "running") {
- print(paste("Activating poller for running job:", job_id))
- history_poller_active(TRUE) # Activate poller
- }
- # --- End Poller Logic ---
-
- } else {
- # If job_id is "" or NULL (e.g., "No jobs found"), clear the metrics.
- history_metrics(NULL)
- }
- })
-
- # --- 6.3: Reactive for Historical Plot Data ---
- history_metrics_for_plotting <- reactive({
- metrics_list <- history_metrics()
- req(metrics_list, length(metrics_list) > 0)
-
- bind_rows(metrics_list) %>%
- filter(!is.na(epoch)) %>% # <--- THIS IS THE FIX
- filter(if_any(everything(), ~ !is.na(.))) %>%
- select(starts_with("eval_"), epoch) %>%
- arrange(epoch) %>%
- distinct(epoch, .keep_all = TRUE)
- })
-
- # --- 6.4: Render Historical Metrics Table ---
- output$history_eval_table <- renderDT({
- metrics_list <- history_metrics()
- req(metrics_list, length(metrics_list) > 0)
-
- metrics_df <- bind_rows(metrics_list)
-
- if (nrow(metrics_df) > 0) {
- display_df <- metrics_df %>%
- pivot_longer(
- cols = starts_with("eval_") | starts_with("test_"),
- names_to = "metric_name",
- values_to = "value",
- values_drop_na = TRUE
- ) %>%
- separate(metric_name, into = c("step", "metric"), sep = "_", extra = "merge") %>%
- pivot_wider(
- names_from = metric,
- values_from = value,
- values_fn = first
- ) %>%
- select(any_of(c("step", "epoch", "loss", "map", "wer", "cer", "mean_iou")), everything())
-
- datatable(
- display_df,
- options = list(pageLength = 5, scrollX = TRUE, searching = FALSE, autoWidth = TRUE),
- rownames = FALSE
- )
- } else {
- return(NULL)
- }
- })
-
- # --- 6.5: Render Historical Plot UI (Dropdown) ---
- output$history_metric_selector_ui <- renderUI({
- df <- tryCatch(history_metrics_for_plotting(), error = function(e) NULL)
-
- if (is.null(df) || nrow(df) == 0) {
- return(p("No evaluation metrics found for this job."))
- }
-
- metric_names <- names(df)[sapply(df, is.numeric) & !names(df) %in% c("epoch", "step", "runtime", "samples_per_second", "steps_per_second")]
-
- default_metric <- "eval_loss"
- if (!"eval_loss" %in% metric_names && length(metric_names) > 0) {
- default_metric <- metric_names[1]
- }
-
- selected_val <- input$history_selected_metric
- if (is.null(selected_val) || !selected_val %in% metric_names) {
- selected_val <- default_metric
- }
-
- selectInput("history_selected_metric", "Select Metric to Plot:",
- choices = metric_names,
- selected = selected_val)
- })
-
- # --- 6.6: Render Historical Plot ---
- output$history_metric_plot <- renderDygraph({
- df <- tryCatch(history_metrics_for_plotting(), error = function(e) NULL)
-
- if (is.null(df) || nrow(df) == 0) {
- return(dygraph(data.frame(x=c(0), y=c(0)), main = "No Metric Data") %>%
- dyOptions(drawGrid = FALSE, drawAxes = FALSE, drawYAxis = FALSE, drawXAxis = FALSE) %>%
- dyAxis("x", label = "Epoch"))
- }
-
- req(input$history_selected_metric)
- req(input$history_selected_metric %in% names(df))
+ dygraph(plot_data, main = paste(metric_to_plot, "vs. Epoch")) %>%
+ dySeries(metric_to_plot, label = metric_to_plot) %>%
+ dyAxis("x", label = "Epoch", valueRange = c(0, max(df$epoch, na.rm = TRUE) + 1)) %>%
+ dyRangeSelector() %>%
+ dyOptions(stackedGraph = FALSE,
+ fillGraph = FALSE,
+ stepPlot = FALSE,
+ drawPoints = TRUE,
+ pointSize = 4) %>%
+ dyLegend(show = "always", width = 200)
+ })
+
+
+ # ==============================================================================
+ # == 6. "TRAINING HISTORY" TAB LOGIC
+ # ==============================================================================
+
+ # --- 6.1: Populate the Job Selector Dropdown (with Filters) ---
+ observe({
+ # React to tab switching, job completion, and filter changes
+ input$main_tabs
+ polled_data()
+ input$history_task_filter
+ input$history_status_filter
+
+ tryCatch({
+
+ query_params <- list()
+ if (input$history_task_filter != "all") {
+ query_params$task_type <- input$history_task_filter
+ }
+ if (input$history_status_filter != "all") {
+ query_params$status <- input$history_status_filter
+ }
+
+ req <- request(paste0(api_url, "/jobs/list"))
+ if (length(query_params) > 0) {
+ req <- req_url_query(req, !!!query_params)
+ }
+
+ resp <- req_perform(req)
+ jobs_list_raw <- resp_body_json(resp, simplifyVector = FALSE)
+
+ if (length(jobs_list_raw) > 0) {
- metric_to_plot <- input$history_selected_metric
+ jobs_df <- bind_rows(lapply(jobs_list_raw, function(job) {
+ data.frame(
+ id = job$id,
+ task_type = job$task_type,
+ status = job$status,
+ run_name = ifelse(is.null(job$details$run_name), "N/A", job$details$run_name)
+ )
+ }))
- if (!"epoch" %in% names(df) || !metric_to_plot %in% names(df)) {
- return(dygraph(data.frame(x=c(0), y=c(0)), main = "Metric data not yet available") %>%
- dyOptions(drawGrid = FALSE, drawAxes = FALSE, drawYAxis = FALSE, drawXAxis = FALSE))
- }
-
- plot_data <- df[, c("epoch", metric_to_plot)]
+ history_jobs_df(jobs_df)
- dygraph(plot_data, main = paste(metric_to_plot, "vs. Epoch")) %>%
- dySeries(metric_to_plot, label = metric_to_plot) %>%
- dyAxis("x", label = "Epoch", valueRange = c(0, max(df$epoch, na.rm = TRUE) + 1)) %>%
- dyRangeSelector() %>%
- dyOptions(stackedGraph = FALSE,
- fillGraph = FALSE,
- stepPlot = FALSE,
- drawPoints = TRUE,
- pointSize = 4) %>%
- dyLegend(show = "always", width = 200)
- })
-
- # --- 6.7: Poller for selected running job in History tab ---
- observe({
- # Only run if:
- req(
- history_poller_active() == TRUE,
- input$main_tabs == "Training History",
- !is.null(input$history_job_selector),
- nchar(input$history_job_selector) > 0
+ job_names <- paste0(
+ jobs_df$run_name,
+ " (", jobs_df$task_type, " | ID: ", substr(jobs_df$id, 1, 8), ") - ",
+ jobs_df$status
)
+ job_choices <- setNames(jobs_df$id, job_names)
- # Poll every 3 seconds
- invalidateLater(3000, session)
-
- job_id <- input$history_job_selector
- print(paste("History Poller: Fetching metrics for", job_id))
+ updateSelectInput(session, "history_job_selector", choices = job_choices)
- tryCatch({
- req <- request(paste0(api_url, "/metrics/", job_id))
- resp <- req_perform(req)
- if (resp_status(resp) == 200) {
- history_metrics(resp_body_json(resp))
- }
-
- # Check if job is still running
- req_status <- request(paste0(api_url, "/status/", job_id))
- resp_status <- req_perform(req_status)
- if (resp_status(resp_status) == 200) {
- status_data <- resp_body_json(resp_status)
- if (status_data$status != "running") {
- print(paste("History Poller: Job", job_id, "is no longer running. Deactivating poller."))
- history_poller_active(FALSE)
- # Refresh the job list dropdown to show "completed"
- observeEvent(model_registry(), {
- req(model_registry())
- refresh_data_trigger(refresh_data_trigger() + 1)
- }, once = TRUE)
- }
- }
-
- }, error = function(e) {
- print(paste("History Poller Error:", e$message))
- history_poller_active(FALSE) # Stop polling on error
- })
+ } else {
+ updateSelectInput(session, "history_job_selector", choices = c("No jobs found" = ""))
+ history_jobs_df(NULL)
+ }
+ }, error = function(e) {
+ updateSelectInput(session, "history_job_selector", choices = c("Error loading jobs" = ""))
+ history_jobs_df(NULL)
})
+ })
+
+ # --- 6.2: Fetch Metrics when user selects a historical job ---
+ observeEvent(input$history_job_selector, {
+ job_id <- input$history_job_selector
+ history_poller_active(FALSE) # Deactivate poller by default
- # ==============================================================================
- # == 7. "INFERENCE" TAB LOGIC
- # ==============================================================================
+ if (!is.null(job_id) && nchar(job_id) > 0) {
+ tryCatch({
+ req <- request(paste0(api_url, "/metrics/", job_id))
+ resp <- req_perform(req)
+ history_metrics(resp_body_json(resp))
+ }, error = function(e) {
+ history_metrics(NULL)
+ })
+
+ # --- Poller Activation Logic ---
+ req(history_jobs_df())
+ job_info <- history_jobs_df() %>% filter(id == job_id)
+ if (nrow(job_info) > 0 && job_info$status == "running") {
+ print(paste("Activating poller for running job:", job_id))
+ history_poller_active(TRUE) # Activate poller
+ }
+ # --- End Poller Logic ---
+
+ } else {
+ # If job_id is "" or NULL (e.g., "No jobs found"), clear the metrics.
+ history_metrics(NULL)
+ }
+ })
+
+ # --- 6.3: Reactive for Historical Plot Data ---
+ history_metrics_for_plotting <- reactive({
+ metrics_list <- history_metrics()
+ req(metrics_list, length(metrics_list) > 0)
- # --- 7.1: Inference Checkpoint Finders ---
- observeEvent(input$infer_run_name, {
- run_name <- input$infer_run_name
- if (nchar(run_name) > 2) {
- tryCatch({
- req <- request(paste0(api_url, "/checkpoints")) %>%
- req_url_query(run_name = run_name, task_type = "object_detection")
- resp <- req_perform(req)
- if (resp_status(resp) == 200) {
- checkpoints <- resp_body_json(resp, simplifyVector = TRUE)
- updateSelectInput(session, "infer_checkpoint_dropdown", choices = checkpoints)
- }
- }, error = function(e) {
- updateSelectInput(session, "infer_checkpoint_dropdown", choices = c("Error finding checkpoints"))
- })
- }
- })
+ bind_rows(metrics_list) %>%
+ filter(!is.na(epoch)) %>% # <--- THIS IS THE FIX
+ filter(if_any(everything(), ~ !is.na(.))) %>%
+ select(starts_with("eval_"), epoch) %>%
+ arrange(epoch) %>%
+ distinct(epoch, .keep_all = TRUE)
+ })
+
+ # --- 6.4: Render Historical Metrics Table ---
+ output$history_eval_table <- renderDT({
+ metrics_list <- history_metrics()
+ req(metrics_list, length(metrics_list) > 0)
- observeEvent(input$infer_asr_run_name, {
- run_name <- input$infer_asr_run_name
- if (nchar(run_name) > 2) {
- tryCatch({
- req <- request(paste0(api_url, "/checkpoints")) %>%
- req_url_query(run_name = run_name, task_type = "asr")
- resp <- req_perform(req)
- if (resp_status(resp) == 200) {
- checkpoints <- resp_body_json(resp, simplifyVector = TRUE)
- updateSelectInput(session, "infer_asr_checkpoint_dropdown", choices = checkpoints)
- }
- }, error = function(e) {
- updateSelectInput(session, "infer_asr_checkpoint_dropdown", choices = c("Error finding checkpoints"))
- })
- }
- })
-
- observeEvent(input$infer_img_class_run_name, {
- run_name <- input$infer_img_class_run_name
- if (nchar(run_name) > 2) {
- tryCatch({
- req <- request(paste0(api_url, "/checkpoints")) %>%
- req_url_query(run_name = run_name, task_type = "image_classification")
- checkpoints <- resp_body_json(req_perform(req), simplifyVector = TRUE)
- updateSelectInput(session, "infer_img_class_checkpoint_dropdown", choices = checkpoints)
- }, error = function(e) { })
- }
- })
+ metrics_df <- bind_rows(metrics_list)
- observeEvent(input$infer_seg_run_name, {
- run_name <- input$infer_seg_run_name
- if (nchar(run_name) > 2) {
- tryCatch({
- req <- request(paste0(api_url, "/checkpoints")) %>%
- req_url_query(run_name = run_name, task_type = "image_segmentation")
- checkpoints <- resp_body_json(req_perform(req), simplifyVector = TRUE)
- updateSelectInput(session, "infer_seg_checkpoint_dropdown", choices = checkpoints)
- }, error = function(e) { })
- }
- })
-
- # --- 7.2: Inference Job Submission (One per task) ---
+ if (nrow(metrics_df) > 0) {
+ display_df <- metrics_df %>%
+ pivot_longer(
+ cols = starts_with("eval_") | starts_with("test_"),
+ names_to = "metric_name",
+ values_to = "value",
+ values_drop_na = TRUE
+ ) %>%
+ separate(metric_name, into = c("step", "metric"), sep = "_", extra = "merge") %>%
+ pivot_wider(
+ names_from = metric,
+ values_from = value,
+ values_fn = first
+ ) %>%
+ select(any_of(c("step", "epoch", "loss", "map", "wer", "cer", "mean_iou")), everything())
+
+ datatable(
+ display_df,
+ options = list(pageLength = 5, scrollX = TRUE, searching = FALSE, autoWidth = TRUE),
+ rownames = FALSE
+ )
+ } else {
+ return(NULL)
+ }
+ })
+
+ # --- 6.5: Render Historical Plot UI (Dropdown) ---
+ output$history_metric_selector_ui <- renderUI({
+ df <- tryCatch(history_metrics_for_plotting(), error = function(e) NULL)
- observeEvent(input$start_obj_inference, {
- req(input$infer_obj_image_upload); req(input$infer_checkpoint_dropdown)
- obj_inference_result(list(status = "Running...", image_url = NULL, error = NULL))
- tryCatch({
- req <- request(paste0(api_url, "/inference/object-detection")) %>%
- req_body_multipart(
- image = curl::form_file(input$infer_obj_image_upload$datapath),
- model_checkpoint = input$infer_checkpoint_dropdown,
- threshold = as.character(input$infer_obj_threshold),
- iou = as.character(input$infer_obj_iou),
- max_det = as.character(input$infer_obj_max_det)
- )
- resp <- req_perform(req)
- resp_data <- resp_body_json(resp)
- obj_inference_result(list(status = "Success", image_url = resp_data$output_url, error = NULL))
- }, error = function(e) {
- error_message <- as.character(e$message)
- if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
- obj_inference_result(list(status = "Error", image_url = NULL, error = error_message))
- })
- })
-
- observeEvent(input$start_asr_inference, {
- req(input$infer_asr_audio_upload)
- req(input$infer_asr_checkpoint_dropdown)
- asr_inference_result(list(status = "Running...", transcription = "Processing...", error = NULL))
- tryCatch({
- req <- request(paste0(api_url, "/inference/asr")) %>%
- req_body_multipart(
- audio = curl::form_file(input$infer_asr_audio_upload$datapath),
- model_checkpoint = input$infer_asr_checkpoint_dropdown
- )
- resp <- req_perform(req)
- resp_data <- resp_body_json(resp)
- asr_inference_result(list(status = "Success", transcription = resp_data$transcription, error = NULL))
- }, error = function(e) {
- error_message <- as.character(e$message)
- if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
- asr_inference_result(list(status = "Error", transcription = NULL, error = error_message))
- })
- })
+ if (is.null(df) || nrow(df) == 0) {
+ return(p("No evaluation metrics found for this job."))
+ }
- observeEvent(input$start_img_class_inference, {
- req(input$infer_img_class_upload, input$infer_img_class_checkpoint_dropdown)
- img_class_inference_result(list(status = "Running...", prediction = "Processing...", error = NULL))
- tryCatch({
- req <- request(paste0(api_url, "/inference/image-classification")) %>%
- req_body_multipart(
- image = curl::form_file(input$infer_img_class_upload$datapath),
- model_checkpoint = input$infer_img_class_checkpoint_dropdown
- )
- resp_data <- resp_body_json(req_perform(req))
- img_class_inference_result(list(status = "Success", prediction = resp_data$prediction, error = NULL))
- }, error = function(e) {
- img_class_inference_result(list(status = "Error", prediction = NULL, error = as.character(e)))
- })
- })
+ metric_names <- names(df)[sapply(df, is.numeric) & !names(df) %in% c("epoch", "step", "runtime", "samples_per_second", "steps_per_second")]
- observeEvent(input$start_seg_inference, {
- req(input$infer_seg_image_upload); req(input$infer_seg_checkpoint_dropdown)
- seg_inference_result(list(status = "Running...", image_url = NULL, error = NULL))
- tryCatch({
- req <- request(paste0(api_url, "/inference/image-segmentation")) %>%
- req_body_multipart(
- image = curl::form_file(input$infer_seg_image_upload$datapath),
- model_checkpoint = input$infer_seg_checkpoint_dropdown
- )
- resp <- req_perform(req)
- resp_data <- resp_body_json(resp)
- seg_inference_result(list(status = "Success", image_url = resp_data$output_url, error = NULL))
- }, error = function(e) {
- error_message <- as.character(e$message)
- if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
- seg_inference_result(list(status = "Error", image_url = NULL, error = error_message))
- })
- })
+ default_metric <- "eval_loss"
+ if (!"eval_loss" %in% metric_names && length(metric_names) > 0) {
+ default_metric <- metric_names[1]
+ }
+
+ selected_val <- input$history_selected_metric
+ if (is.null(selected_val) || !selected_val %in% metric_names) {
+ selected_val <- default_metric
+ }
+
+ selectInput("history_selected_metric", "Select Metric to Plot:",
+ choices = metric_names,
+ selected = selected_val)
+ })
+
+ # --- 6.6: Render Historical Plot ---
+ output$history_metric_plot <- renderDygraph({
+ df <- tryCatch(history_metrics_for_plotting(), error = function(e) NULL)
+
+ if (is.null(df) || nrow(df) == 0) {
+ return(dygraph(data.frame(x=c(0), y=c(0)), main = "No Metric Data") %>%
+ dyOptions(drawGrid = FALSE, drawAxes = FALSE, drawYAxis = FALSE, drawXAxis = FALSE) %>%
+ dyAxis("x", label = "Epoch"))
+ }
- # --- 7.3: Inference UI Outputs ---
+ req(input$history_selected_metric)
+ req(input$history_selected_metric %in% names(df))
- output$inference_status_ui <- renderUI({
- res <- obj_inference_result()
- if (res$status == "Running...") {
- tags$div(class = "alert alert-info", "Running inference...")
- } else if (res$status == "Error") {
- tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
+ metric_to_plot <- input$history_selected_metric
+
+ if (!"epoch" %in% names(df) || !metric_to_plot %in% names(df)) {
+ return(dygraph(data.frame(x=c(0), y=c(0)), main = "Metric data not yet available") %>%
+ dyOptions(drawGrid = FALSE, drawAxes = FALSE, drawYAxis = FALSE, drawXAxis = FALSE))
+ }
+
+ plot_data <- df[, c("epoch", metric_to_plot)]
+
+ dygraph(plot_data, main = paste(metric_to_plot, "vs. Epoch")) %>%
+ dySeries(metric_to_plot, label = metric_to_plot) %>%
+ dyAxis("x", label = "Epoch", valueRange = c(0, max(df$epoch, na.rm = TRUE) + 1)) %>%
+ dyRangeSelector() %>%
+ dyOptions(stackedGraph = FALSE,
+ fillGraph = FALSE,
+ stepPlot = FALSE,
+ drawPoints = TRUE,
+ pointSize = 4) %>%
+ dyLegend(show = "always", width = 200)
+ })
+
+ # --- 6.7: Poller for selected running job in History tab ---
+ observe({
+ # Only run if:
+ req(
+ history_poller_active() == TRUE,
+ input$main_tabs == "Training History",
+ !is.null(input$history_job_selector),
+ nchar(input$history_job_selector) > 0
+ )
+
+ # Poll every 3 seconds
+ invalidateLater(3000, session)
+
+ job_id <- input$history_job_selector
+ print(paste("History Poller: Fetching metrics for", job_id))
+
+ tryCatch({
+ req <- request(paste0(api_url, "/metrics/", job_id))
+ resp <- req_perform(req)
+ if (resp_status(resp) == 200) {
+ history_metrics(resp_body_json(resp))
+ }
+
+ # Check if job is still running
+ req_status <- request(paste0(api_url, "/status/", job_id))
+ resp_status <- req_perform(req_status)
+ if (resp_status(resp_status) == 200) {
+ status_data <- resp_body_json(resp_status)
+ if (status_data$status != "running") {
+ print(paste("History Poller: Job", job_id, "is no longer running. Deactivating poller."))
+ history_poller_active(FALSE)
+ # Refresh the job list dropdown to show "completed"
+ observeEvent(model_registry(), {
+ req(model_registry())
+ refresh_data_trigger(refresh_data_trigger() + 1)
+ }, once = TRUE)
}
+ }
+
+ }, error = function(e) {
+ print(paste("History Poller Error:", e$message))
+ history_poller_active(FALSE) # Stop polling on error
})
- output$inference_image_output <- renderImage({
- res <- obj_inference_result()
- req(res$status == "Success", res$image_url)
- image_url <- paste0(api_url, res$image_url)
- temp_file <- tempfile(fileext = ".jpg")
- download.file(image_url, temp_file, mode = "wb")
- list(src = temp_file, contentType = 'image/jpeg', alt = "Inference Result")
- }, deleteFile = TRUE)
-
- output$asr_inference_status_ui <- renderUI({
- res <- asr_inference_result()
- if (res$status == "Running...") {
- tags$div(class = "alert alert-info", "Running inference...")
- } else if (res$status == "Error") {
- tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
+ })
+
+
+ # ==============================================================================
+ # == 7. "INFERENCE" TAB LOGIC
+ # ==============================================================================
+
+ # --- 7.1: Inference Checkpoint Finders ---
+ observeEvent(input$infer_run_name, {
+ run_name <- input$infer_run_name
+ if (nchar(run_name) > 2) {
+ tryCatch({
+ req <- request(paste0(api_url, "/checkpoints")) %>%
+ req_url_query(run_name = run_name, task_type = "object_detection")
+ resp <- req_perform(req)
+ if (resp_status(resp) == 200) {
+ checkpoints <- resp_body_json(resp, simplifyVector = TRUE)
+ updateSelectInput(session, "infer_checkpoint_dropdown", choices = checkpoints)
}
- })
- output$asr_transcription_output <- renderText({
- res <- asr_inference_result()
- if (is.null(res$transcription)) {
- "Upload an audio file and click 'Run Inference' to see the transcription here."
- } else {
- res$transcription
+ }, error = function(e) {
+ updateSelectInput(session, "infer_checkpoint_dropdown", choices = c("Error finding checkpoints"))
+ })
+ }
+ })
+
+ observeEvent(input$infer_asr_run_name, {
+ run_name <- input$infer_asr_run_name
+ if (nchar(run_name) > 2) {
+ tryCatch({
+ req <- request(paste0(api_url, "/checkpoints")) %>%
+ req_url_query(run_name = run_name, task_type = "asr")
+ resp <- req_perform(req)
+ if (resp_status(resp) == 200) {
+ checkpoints <- resp_body_json(resp, simplifyVector = TRUE)
+ updateSelectInput(session, "infer_asr_checkpoint_dropdown", choices = checkpoints)
}
+ }, error = function(e) {
+ updateSelectInput(session, "infer_asr_checkpoint_dropdown", choices = c("Error finding checkpoints"))
+ })
+ }
+ })
+
+ observeEvent(input$infer_img_class_run_name, {
+ run_name <- input$infer_img_class_run_name
+ if (nchar(run_name) > 2) {
+ tryCatch({
+ req <- request(paste0(api_url, "/checkpoints")) %>%
+ req_url_query(run_name = run_name, task_type = "image_classification")
+ checkpoints <- resp_body_json(req_perform(req), simplifyVector = TRUE)
+ updateSelectInput(session, "infer_img_class_checkpoint_dropdown", choices = checkpoints)
+ }, error = function(e) { })
+ }
+ })
+
+ observeEvent(input$infer_seg_run_name, {
+ run_name <- input$infer_seg_run_name
+ if (nchar(run_name) > 2) {
+ tryCatch({
+ req <- request(paste0(api_url, "/checkpoints")) %>%
+ req_url_query(run_name = run_name, task_type = "image_segmentation")
+ checkpoints <- resp_body_json(req_perform(req), simplifyVector = TRUE)
+ updateSelectInput(session, "infer_seg_checkpoint_dropdown", choices = checkpoints)
+ }, error = function(e) { })
+ }
+ })
+
+ # --- 7.2: Inference Job Submission (One per task) ---
+
+ observeEvent(input$start_obj_inference, {
+ req(input$infer_obj_image_upload); req(input$infer_checkpoint_dropdown)
+ obj_inference_result(list(status = "Running...", image_url = NULL, error = NULL))
+ tryCatch({
+ req <- request(paste0(api_url, "/inference/object-detection")) %>%
+ req_body_multipart(
+ image = curl::form_file(input$infer_obj_image_upload$datapath),
+ model_checkpoint = input$infer_checkpoint_dropdown,
+ threshold = as.character(input$infer_obj_threshold),
+ iou = as.character(input$infer_obj_iou),
+ max_det = as.character(input$infer_obj_max_det)
+ )
+ resp <- req_perform(req)
+ resp_data <- resp_body_json(resp)
+ obj_inference_result(list(status = "Success", image_url = resp_data$output_url, error = NULL))
+ }, error = function(e) {
+ error_message <- as.character(e$message)
+ if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
+ obj_inference_result(list(status = "Error", image_url = NULL, error = error_message))
})
-
- output$img_class_inference_status_ui <- renderUI({
- res <- img_class_inference_result()
- if (res$status == "Running...") tags$div(class = "alert alert-info", "Running inference...")
- else if (res$status == "Error") tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
+ })
+
+ observeEvent(input$start_asr_inference, {
+ req(input$infer_asr_audio_upload)
+ req(input$infer_asr_checkpoint_dropdown)
+ asr_inference_result(list(status = "Running...", transcription = "Processing...", error = NULL))
+ tryCatch({
+ req <- request(paste0(api_url, "/inference/asr")) %>%
+ req_body_multipart(
+ audio = curl::form_file(input$infer_asr_audio_upload$datapath),
+ model_checkpoint = input$infer_asr_checkpoint_dropdown
+ )
+ resp <- req_perform(req)
+ resp_data <- resp_body_json(resp)
+ asr_inference_result(list(status = "Success", transcription = resp_data$transcription, error = NULL))
+ }, error = function(e) {
+ error_message <- as.character(e$message)
+ if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
+ asr_inference_result(list(status = "Error", transcription = NULL, error = error_message))
})
- output$img_class_prediction_output <- renderText({
- img_class_inference_result()$prediction
+ })
+
+ observeEvent(input$start_img_class_inference, {
+ req(input$infer_img_class_upload, input$infer_img_class_checkpoint_dropdown)
+ img_class_inference_result(list(status = "Running...", prediction = "Processing...", error = NULL))
+ tryCatch({
+ req <- request(paste0(api_url, "/inference/image-classification")) %>%
+ req_body_multipart(
+ image = curl::form_file(input$infer_img_class_upload$datapath),
+ model_checkpoint = input$infer_img_class_checkpoint_dropdown
+ )
+ resp_data <- resp_body_json(req_perform(req))
+ img_class_inference_result(list(status = "Success", prediction = resp_data$prediction, error = NULL))
+ }, error = function(e) {
+ img_class_inference_result(list(status = "Error", prediction = NULL, error = as.character(e)))
})
-
- output$seg_inference_status_ui <- renderUI({
- res <- seg_inference_result()
- if (res$status == "Running...") {
- tags$div(class = "alert alert-info", "Running inference...")
- } else if (res$status == "Error") {
- tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
- }
+ })
+
+ observeEvent(input$start_seg_inference, {
+ req(input$infer_seg_image_upload); req(input$infer_seg_checkpoint_dropdown)
+ seg_inference_result(list(status = "Running...", image_url = NULL, error = NULL))
+ tryCatch({
+ req <- request(paste0(api_url, "/inference/image-segmentation")) %>%
+ req_body_multipart(
+ image = curl::form_file(input$infer_seg_image_upload$datapath),
+ model_checkpoint = input$infer_seg_checkpoint_dropdown
+ )
+ resp <- req_perform(req)
+ resp_data <- resp_body_json(resp)
+ seg_inference_result(list(status = "Success", image_url = resp_data$output_url, error = NULL))
+ }, error = function(e) {
+ error_message <- as.character(e$message)
+ if(!is.null(e$body)) { error_message <- paste("API Error:", e$body) }
+ seg_inference_result(list(status = "Error", image_url = NULL, error = error_message))
})
- output$seg_inference_image_output <- renderImage({
- res <- seg_inference_result()
- req(res$status == "Success", res$image_url)
- image_url <- paste0(api_url, res$image_url)
- temp_file <- tempfile(fileext = ".jpg")
- download.file(image_url, temp_file, mode = "wb")
- list(src = temp_file, contentType = 'image/jpeg', alt = "Inference Result")
- }, deleteFile = TRUE)
-
+ })
+
+ # --- 7.3: Inference UI Outputs ---
+
+ output$inference_status_ui <- renderUI({
+ res <- obj_inference_result()
+ if (res$status == "Running...") {
+ tags$div(class = "alert alert-info", "Running inference...")
+ } else if (res$status == "Error") {
+ tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
+ }
+ })
+ output$inference_image_output <- renderImage({
+ res <- obj_inference_result()
+ req(res$status == "Success", res$image_url)
+ image_url <- paste0(api_url, res$image_url)
+ temp_file <- tempfile(fileext = ".jpg")
+ download.file(image_url, temp_file, mode = "wb")
+ list(src = temp_file, contentType = 'image/jpeg', alt = "Inference Result")
+ }, deleteFile = TRUE)
+
+ output$asr_inference_status_ui <- renderUI({
+ res <- asr_inference_result()
+ if (res$status == "Running...") {
+ tags$div(class = "alert alert-info", "Running inference...")
+ } else if (res$status == "Error") {
+ tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
+ }
+ })
+ output$asr_transcription_output <- renderText({
+ res <- asr_inference_result()
+ if (is.null(res$transcription)) {
+ "Upload an audio file and click 'Run Inference' to see the transcription here."
+ } else {
+ res$transcription
+ }
+ })
+
+ output$img_class_inference_status_ui <- renderUI({
+ res <- img_class_inference_result()
+ if (res$status == "Running...") tags$div(class = "alert alert-info", "Running inference...")
+ else if (res$status == "Error") tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
+ })
+ output$img_class_prediction_output <- renderText({
+ img_class_inference_result()$prediction
+ })
+
+ output$seg_inference_status_ui <- renderUI({
+ res <- seg_inference_result()
+ if (res$status == "Running...") {
+ tags$div(class = "alert alert-info", "Running inference...")
+ } else if (res$status == "Error") {
+ tags$div(class = "alert alert-danger", HTML(paste("Error:", res$error)))
+ }
+ })
+ output$seg_inference_image_output <- renderImage({
+ res <- seg_inference_result()
+ req(res$status == "Success", res$image_url)
+ image_url <- paste0(api_url, res$image_url)
+ temp_file <- tempfile(fileext = ".jpg")
+ download.file(image_url, temp_file, mode = "wb")
+ list(src = temp_file, contentType = 'image/jpeg', alt = "Inference Result")
+ }, deleteFile = TRUE)
+
}
diff --git a/server/header_footer_configs.R b/server/header_footer_configs.R
index 9bc42a7..73b1ef9 100644
--- a/server/header_footer_configs.R
+++ b/server/header_footer_configs.R
@@ -1,25 +1,24 @@
app_title = function() {
- output$app_title = renderUI({
- h3(get_rv_labels("app_title"), style = "text-align: center;")
- })
+ output$app_title = renderUI({
+ h3(get_rv_labels("app_title"), style = "text-align: center;")
+ })
}
#Footer Language convertion
footer_language_translation = function() {
output$app_footer_title = renderUI({
- # h4(get_rv_labels("footer_org_name"))
- h4(get_rv_labels("app_version"))
+ h4(get_rv_labels("footer_org_name"))
})
-
+
output$app_footer_contact = renderUI({
h4(HTML(paste0('',get_rv_labels("footer_contact"), ': example@aphrc.org')))
})
-
+
output$app_footer_all_rights = renderUI({
h4(get_rv_labels("footer_org_name"))
})
-
+
output$app_footer_all_rights = renderUI({
h4(paste0(get_rv_labels("footer_copyright"), " © ", format(Sys.Date(), "%Y"), ", ", get_rv_labels("footer_all_rights")), style = "text-align: right;")
})
@@ -31,85 +30,100 @@ footer_language_translation = function() {
menu_translation = function(){
output$dynamic_meinu_aphrc <- renderMenu({
sidebarMenu(id = "tabs",
- menuItem(text = HTML(paste0("")), tabName = "homePage", icon = icon("house"), selected = TRUE),
- menuItem(text = HTML(paste0("")), tabName = "sourcedata", icon = icon("file-import", lib = "font-awesome")),
- menuItem(
- text = HTML(paste0("")), tabName = "manageData", icon = icon("glyphicon glyphicon-tasks", lib = "glyphicon"),
- menuSubItem(text = HTML(paste0("")), tabName = "Overview", icon = icon("table-columns", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "Explore", icon = icon("object-ungroup", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "Transform", icon = icon("table-columns", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "combineData", icon = icon("table-columns", lib = "font-awesome"))
- ),
- menuItem(text = HTML(paste0("")), tabName = "visualizeData", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon"),
- menuItem(text = HTML(paste0("")), tabName = "summarizeAutomatic", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon")
-
- ),
- menuItem(text = HTML(paste0("")), tabName = "summarizeCustom", icon = icon("chart-line"))),
-
-
- menuItem(text = HTML(paste0("")), tabName = "researchQuestions", icon = icon("file-import", lib = "font-awesome"), selected = FALSE),
-
-
- menuItem(
- text = HTML(paste0("")), tabName = "machineLearning", icon = icon("code-merge", lib = "font-awesome"),
- menuSubItem(text = HTML(paste0("")), tabName = "setupModels", icon = icon("arrows-split-up-and-left", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "featureEngineering", icon = icon("sitemap", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "trainModel", icon = icon("gear", lib = "font-awesome")),
- menuSubItem(text =HTML(paste0("")), tabName = "validateDeployModel", icon = icon("server", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "predictClassify", icon = icon("layer-group", lib = "font-awesome"))
- ),
- menuItem(text = HTML(paste0("")), tabName = "omopAnalysis", icon = icon("magnifying-glass-chart", lib = "font-awesome"),
- menuSubItem(text =HTML(paste0("")), tabName = "evidenceQuality", icon = icon("server", lib = "font-awesome")),
- #menuSubItem(text = HTML(paste0("")), tabName = "cohortConstruction", icon = icon("layer-group", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "achilles", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon")),
- menuSubItem(text = HTML(paste0("")), tabName = "omop_visualizations", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon")),
- menuSubItem(text = HTML(paste0("")), tabName = "CohortConstructor", icon = icon("arrows-split-up-and-left", lib = "font-awesome")),
- menuSubItem(text = HTML(paste0("")), tabName = "FeatureExtraction", icon = icon("arrows-split-up-and-left", lib = "font-awesome"))
-
-
- ),
- menuItem(
- HTML(paste0("")),
- tabName = "deeplearning",
- icon = icon("gear"),
-
- # # --- CNN Deep section with its own submenus ---
- # menuItem(
- # HTML(paste0("")),
- # tabName = "cnntransformers",
- # icon = icon("gear", lib = "font-awesome"),
- #
- # menuSubItem(get_rv_labels("dashboard"), tabName = "dashboard", icon = icon("dashboard")),
- # menuSubItem(get_rv_labels("c_pipeline"), tabName = "create", icon = icon("plus-circle")),
- # menuSubItem(get_rv_labels("t_model"), tabName = "train", icon = icon("cog")),
- # menuSubItem(get_rv_labels("m_predictions"), tabName = "predict", icon = icon("eye")),
- # menuSubItem(get_rv_labels("v_jobs"), tabName = "jobs", icon = icon("list")),
- # menuSubItem(get_rv_labels("v_datasets"), tabName = "datasets", icon = icon("database")),
- # menuSubItem(get_rv_labels("d_jobs"), tabName = "delete", icon = icon("trash"))
- #),
-
- # --- Transformers Deep ---
- menuItem(
- HTML(paste0("")),
- tabName = "cnndeep",
- icon = icon("server", lib = "font-awesome")
- )
- )
- ,
-
-
- menuItem(HTML(paste0("")), tabName = "addResources", icon = icon("book")),
- br(),
- div(
- style="display: flex; justify-content: flex-start; gap: 10px; margin-top: 5px;",
- actionBttn("logoutID", label = get_rv_labels("logoutID"), class = "btn")
- #is_logged_in(
- # id = app_login_config$APP_ID,login::logout_button(id = "logout", label = "Sign out"))
- )
-
- )
+ menuItem(text = HTML(paste0("")), tabName = "homePage", icon = icon("house"), selected = TRUE),
+ menuItem(text = HTML(paste0("")), tabName = "sourcedata", icon = icon("file-import", lib = "font-awesome")),
+ menuItem(
+ text = HTML(paste0("")), tabName = "manageData", icon = icon("glyphicon glyphicon-tasks", lib = "glyphicon"),
+ menuSubItem(text = HTML(paste0("")), tabName = "Overview", icon = icon("table-columns", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "Explore", icon = icon("object-ungroup", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "Transform", icon = icon("table-columns", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "combineData", icon = icon("table-columns", lib = "font-awesome"))
+ ),
+ menuItem(text = HTML(paste0("")), tabName = "visualizeData", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon"),
+ menuItem(text = HTML(paste0("")), tabName = "summarizeAutomatic", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon")
+
+ ),
+ menuItem(text = HTML(paste0("")), tabName = "summarizeCustom", icon = icon("chart-line"))),
+
+
+ menuItem(text = HTML(paste0("")), tabName = "researchQuestions", icon = icon("file-import", lib = "font-awesome"), selected = FALSE),
+
+
+ menuItem(
+ text = HTML(paste0("")), tabName = "machineLearning", icon = icon("code-merge", lib = "font-awesome"),
+ menuSubItem(text = HTML(paste0("")), tabName = "setupModels", icon = icon("arrows-split-up-and-left", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "featureEngineering", icon = icon("sitemap", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "trainModel", icon = icon("gear", lib = "font-awesome")),
+ menuSubItem(text =HTML(paste0("")), tabName = "validateDeployModel", icon = icon("server", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "predictClassify", icon = icon("layer-group", lib = "font-awesome"))
+ ),
+ menuItem(text = HTML(paste0("")), tabName = "omopAnalysis", icon = icon("magnifying-glass-chart", lib = "font-awesome"),
+ menuSubItem(text =HTML(paste0("")), tabName = "evidenceQuality", icon = icon("server", lib = "font-awesome")),
+ #menuSubItem(text = HTML(paste0("")), tabName = "cohortConstruction", icon = icon("layer-group", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "achilles", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon")),
+ menuSubItem(text = HTML(paste0("")), tabName = "omop_visualizations", icon = icon("glyphicon glyphicon-stats", lib = "glyphicon")),
+ menuSubItem(text = HTML(paste0("")), tabName = "CohortConstructor", icon = icon("arrows-split-up-and-left", lib = "font-awesome")),
+ menuSubItem(text = HTML(paste0("")), tabName = "FeatureExtraction", icon = icon("arrows-split-up-and-left", lib = "font-awesome"))
+
+
+ ),
+ menuItem(
+ HTML(paste0("")),
+ tabName = "deeplearning",
+ icon = icon("gear"),
+
+ # # --- CNN Deep section with its own submenus ---
+ # menuItem(
+ # HTML(paste0("")),
+ # tabName = "cnntransformers",
+ # icon = icon("gear", lib = "font-awesome"),
+ #
+ # menuSubItem(get_rv_labels("dashboard"), tabName = "dashboard", icon = icon("dashboard")),
+ # menuSubItem(get_rv_labels("c_pipeline"), tabName = "create", icon = icon("plus-circle")),
+ # menuSubItem(get_rv_labels("t_model"), tabName = "train", icon = icon("cog")),
+ # menuSubItem(get_rv_labels("m_predictions"), tabName = "predict", icon = icon("eye")),
+ # menuSubItem(get_rv_labels("v_jobs"), tabName = "jobs", icon = icon("list")),
+ # menuSubItem(get_rv_labels("v_datasets"), tabName = "datasets", icon = icon("database")),
+ # menuSubItem(get_rv_labels("d_jobs"), tabName = "delete", icon = icon("trash"))
+ #),
+
+
+ # --- Transformers Deep ---
+ menuItem(
+ HTML(paste0("")),
+ tabName = "cnndeep",
+ icon = icon("server", lib = "font-awesome")
+ )
+ )
+ ,
+ menuItem(
+ text = HTML(paste0("")),
+ icon = icon("user-shield", lib = "font-awesome"),
+ menuSubItem(
+ text = HTML(paste0("")),
+ tabName = "anonymization_quant",
+ icon = icon("calculator")
+ ),
+ menuSubItem(
+ text = HTML(paste0("")),
+ tabName = "anonymization_qual",
+ icon = icon("align-left")
+ )
+ ),
+
+
+
+ menuItem(HTML(paste0("")), tabName = "addResources", icon = icon("book")),
+ br(),
+ div(
+ style="display: flex; justify-content: flex-start; gap: 10px; margin-top: 5px;",
+ actionBttn("logoutID", label = get_rv_labels("logoutID"), class = "btn")
+ #is_logged_in(
+ # id = app_login_config$APP_ID,login::logout_button(id = "logout", label = "Sign out"))
+ )
+
+ )
})
-}
-
+}
\ No newline at end of file
diff --git a/server/load_libraries.R b/server/load_libraries.R
new file mode 100644
index 0000000..5517f39
--- /dev/null
+++ b/server/load_libraries.R
@@ -0,0 +1,19 @@
+library(shiny)
+library(shinyjs)
+library(sortable) # drag-and-drop QID picker
+library(shinyWidgets) # gauges & widgets
+library(flexdashboard) # gaugeOutput()
+library(shinyAce) # Ace editors
+library(leaflet) # map preview for coordinates
+library(dplyr)
+library(readr)
+library(readxl)
+library(openxlsx)
+library(haven)
+library(shinyjs)
+library(shiny)
+library(digest)
+library(sdcMicro)
+library(rmarkdown)
+library(pagedown)
+library(data.table)
diff --git a/static_files/labelling_file.xlsx b/static_files/labelling_file.xlsx
index cf4ef8d..4d2eabc 100644
Binary files a/static_files/labelling_file.xlsx and b/static_files/labelling_file.xlsx differ
diff --git a/ui.R b/ui.R
index ced999e..7f77de6 100644
--- a/ui.R
+++ b/ui.R
@@ -1,98 +1,158 @@
-#Only UI files and R packages should be included
-#Load R packages
-source(paste0(getwd(), "/ui/load_r_packages.R"))
+.app_root <- normalizePath(getwd(), mustWork = TRUE)
-## Language change utilities
-source("R/shinyutilities.R")
+# =============================================================================
+# Static assets
+# =============================================================================
+.anon_dir <- file.path(.app_root, "www", "anon_assets")
-source(paste0(getwd(), "/server/maskedpassinput.R"))
-# U1 Add this line of code to call automl_UI from UI folder
-source("ui/automl_ui.R")
-# Automl controls
-source("ui/automl_controls_ui.R")
-source("ui/train_model_ui.R")
+if (dir.exists(.anon_dir)) {
+ shiny::addResourcePath("anon_assets", .anon_dir)
+}
-# Load UI function before deploy_model_ui()
-source("ui/deploy_model_ui.R")
-#Load Headertag
-source(paste0(getwd(), "/ui/login_credentials.R"))
+# =============================================================================
+# Load packages FIRST (this should load shiny, shinydashboard, shinyjs, waiter, login, etc)
+# =============================================================================
+source(file.path(.app_root, "ui", "load_r_packages.R"))
-source(paste0(getwd(), "/ui/headertag.R"))
-#Load App Theme
-source(paste0(getwd(), "/ui/appTheme.R"))
-#Load Header
-source(paste0(getwd(), "/ui/header.R"))
-#Load Footer
-source(paste0(getwd(), "/ui/footer.R"))
-source(paste0(getwd(), "/ui/homepage.R"))
+# =============================================================================
+# UI helpers / assets
+# =============================================================================
+source(file.path(.app_root, "ui", "sidebar_hover_collapse.R"))
+source(file.path(.app_root, "R", "shinyutilities.R"))
+source(file.path(.app_root, "server", "maskedpassinput.R"))
-source("ui/dashboard_body.R")
+# =============================================================================
+# App UI modules
+# =============================================================================
+source(file.path(.app_root, "ui", "automl_ui.R"))
+source(file.path(.app_root, "ui", "automl_controls_ui.R"))
+source(file.path(.app_root, "ui", "train_model_ui.R"))
+source(file.path(.app_root, "ui", "deploy_model_ui.R"))
+# =============================================================================
+# Login / header / theme / footer / homepage
+# =============================================================================
+source(file.path(.app_root, "ui", "login_credentials.R"))
+source(file.path(.app_root, "ui", "headertag.R"))
+source(file.path(.app_root, "ui", "appTheme.R"))
+source(file.path(.app_root, "ui", "header.R"))
+source(file.path(.app_root, "ui", "footer.R"))
+source(file.path(.app_root, "ui", "homepage.R"))
-#Sidebar
-aphrcSiderbar <- dashboardSidebar(
- width = "20%",
- #menuItemOutput("dynamic_meinu_aphrc"),
- sidebarMenuOutput("dynamic_meinu_aphrc")
- #menuItem("AutoML", tabName = "automl_tab", icon = icon("robot"))
-
+# =============================================================================
+# Anonymization UI (module + tab UIs)
+# =============================================================================
+# NOTE: local=FALSE so module functions exist at global scope
+.mod_quant_path <- file.path(.app_root, "modules", "mod_quant_anonymization.R")
+if (file.exists(.mod_quant_path)) {
+ source(.mod_quant_path, local = FALSE)
+}
+
+.anon_ui_path <- file.path(.app_root, "ui", "anonymization_ui.R")
+if (file.exists(.anon_ui_path)) {
+ source(.anon_ui_path, local = FALSE)
+}
+
+# =============================================================================
+# Dashboard body must be last (it uses functions above)
+# =============================================================================
+source(file.path(.app_root, "ui", "dashboard_body.R"))
+
+# =============================================================================
+# Shinydashboard shell
+# =============================================================================
+
+aphrcSiderbar <- shinydashboard::dashboardSidebar(
+ width = "240%",
+ shinydashboard::sidebarMenuOutput("dynamic_meinu_aphrc")
)
-#Body
-fluidPage(
- useShinyjs(),
- useWaiter(),
-
- waiterShowOnLoad(
- color = "#FFF",
- html = spin_loaders(id = 2, style="width:56px;height:56px;color:#7BC148;"),
- logo= "WWW/aphrc.png"),
+# IMPORTANT:
+# - disable=TRUE removes the header bar entirely (no hamburger)
+# - disable=FALSE keeps header bar + hamburger toggle available
+aphrcHeader <- shinydashboard::dashboardHeader(disable = FALSE)
+
+# =============================================================================
+# UI object (THIS MUST BE NAMED `ui` if using shinyApp(ui=ui,...))
+# =============================================================================
- div(
- id = "auth_wrapper1", # <– will be shown after spinner
- login::is_logged_in(
- id = app_login_config$APP_ID, header
- ),
-
- aphrcHeader <- dashboardHeader(disable = TRUE),
-
- login::is_not_logged_in(
- id = app_login_config$APP_ID,
- div(
- class = "auth-container",
- br(),
- div(
- class = "auth-title text-center",
- tags$img(src = "aphrc.png", height = "80px", style = "margin-bottom: 15px;"),
- h3("Welcome to Nocode Platform")
- ),
- div(
- class = "toggle-buttons",
- actionButton("show_login", "Login", class = "btn btn-outline-success"),
- actionButton("show_signup", "Sign Up", class = "btn btn-outline-success"),
- actionButton("show_reset", "Reset Password", class = "btn btn-outline-success")
- ),
- div(id = "login_form",
- login::login_ui(id = app_login_config$APP_ID)
- ),
- div(id = "signup_form", style = "display:none;",
- login::new_user_ui(id = app_login_config$APP_ID)
- ),
- div(id = "reset_form", style = "display:none;",
- login::reset_password_ui(id = app_login_config$APP_ID)
- )
+ui <- shiny::fluidPage(
+ shinyjs::useShinyjs(),
+ waiter::useWaiter(),
+
+ shiny::tags$head(
+ # anonymization CSS in WWW/anon_assets
+ shiny::tags$link(rel = "stylesheet", type = "text/css", href = "anon_assets/custom.css?v=20260129"),
+ # Font Awesome (needed by anonymization module icons and your hover assets)
+ shiny::tags$link(rel = "stylesheet",
+ href = "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.0/css/all.min.css"),
+
+ # hover-collapse assets (your helper)
+ sidebar_hover_collapse_assets(default_topbar_px = 110)
+ ),
+
+ shiny::div(
+ id = "auth_wrapper1",
+
+ # Logged-in header (your existing `header` object from ui/header.R)
+ login::is_logged_in(
+ id = app_login_config$APP_ID,
+ header
+ ),
+
+ # Not logged-in auth UI
+ login::is_not_logged_in(
+ id = app_login_config$APP_ID,
+ shiny::div(
+ class = "auth-container",
+ shiny::br(),
+
+ shiny::div(
+ class = "auth-title text-center",
+ shiny::tags$img(src = "aphrc.png", height = "80px",
+ style = "margin-bottom: 15px;"),
+ shiny::tags$h3("Welcome to Nocode Platform")
+ ),
+
+ shiny::div(
+ class = "toggle-buttons",
+ shiny::actionButton("show_login", "Login", class = "btn btn-outline-success"),
+ shiny::actionButton("show_signup", "Sign Up", class = "btn btn-outline-success"),
+ shiny::actionButton("show_reset", "Reset Password", class = "btn btn-outline-success")
+ ),
+
+ shiny::div(
+ id = "login_form",
+ login::login_ui(id = app_login_config$APP_ID)
+ ),
+
+ shiny::div(
+ id = "signup_form", style = "display:none;",
+ login::new_user_ui(id = app_login_config$APP_ID)
+ ),
+
+ shiny::div(
+ id = "reset_form", style = "display:none;",
+ login::reset_password_ui(id = app_login_config$APP_ID)
)
- ),
-
- login::is_logged_in(
- id = app_login_config$APP_ID,
- div(dashboardPage(aphrcHeader, aphrcSiderbar, aphrcBody, skin = "green"))
- ),
-
- login::is_logged_in(
- id = app_login_config$APP_ID,
- div(footer)
)
- ))
-
-
+ ),
+
+ # Logged-in dashboard
+ login::is_logged_in(
+ id = app_login_config$APP_ID,
+ shinydashboard::dashboardPage(
+ header = aphrcHeader,
+ sidebar = aphrcSiderbar,
+ body = aphrcBody,
+ skin = "green"
+ )
+ ),
+
+ # Logged-in footer
+ login::is_logged_in(
+ id = app_login_config$APP_ID,
+ footer
+ )
+ )
+)
diff --git a/ui/anonymization_ui.R b/ui/anonymization_ui.R
new file mode 100644
index 0000000..14f16b4
--- /dev/null
+++ b/ui/anonymization_ui.R
@@ -0,0 +1,14 @@
+anonymization_quant_ui <- function() {
+ shinydashboard::tabItem(
+ tabName = "anonymization_quant",
+ mod_quant_anon_ui("quant_anon")
+ )
+}
+
+anonymization_qual_ui <- function() {
+ shinydashboard::tabItem(
+ tabName = "anonymization_qual",
+ shiny::h3("Qualitative Anonymization"),
+ shiny::p("Coming soon / integrate later.")
+ )
+}
diff --git a/ui/dashboard_body.R b/ui/dashboard_body.R
index a38606d..0010713 100644
--- a/ui/dashboard_body.R
+++ b/ui/dashboard_body.R
@@ -22,125 +22,130 @@ source("ui/add_resources_ui.R")
source("ui/deploy_model_ui.R", local=TRUE)
source("ui/predict_pycaret_ui.R", local = TRUE)
+
#### ---- Change language --------------------------------------------
source("server/change_language.R", local = TRUE)
#### Extracts language specific labels
get_rv_labels = function(var) {
- get_rv_labels_base(rv_lang$labelling_file_df, var)
+ get_rv_labels_base(rv_lang$labelling_file_df, var)
}
-
aphrcBody <- dashboardBody(
- headertag,
- useShinyjs(),
- useAttendant(),
- # useWaiter(), #FIXME: Use better one
- theme = appTheme,
- # -- Handler JS pour capter les clics 'Deploy' dans la DataTable du module --
- # dashboard_body (une seule fois, avant tabItems)
- tags$head(
- tags$script(HTML("
- Shiny.addCustomMessageHandler('bindDeployBtn', function(msg) {
- var ns = msg.ns;
-
- // Nettoie d'abord les handlers (évite doublons)
- $(document).off('click', '.action-deploy');
- $(document).off('click', '.action-stop');
-
- // ---- Deploy ----
- $(document).on('click', '.action-deploy', function(){
- var $btn = $(this);
- var mid = $btn.data('model');
-
- // Feedback visuel immédiat
- $btn.prop('disabled', true).text('Deploying...');
-
- // Envoi à Shiny
- Shiny.setInputValue(ns + 'deploy_model_id', mid, {priority: 'event'});
- });
-
- // ---- Stop ----
- $(document).on('click', '.action-stop', function(){
- var $btn = $(this);
- var mid = $btn.data('model');
-
- $btn.prop('disabled', true).text('Stopping...');
-
- Shiny.setInputValue(ns + 'stop_model_id', mid, {priority: 'event'});
- });
- });
- Shiny.addCustomMessageHandler('openSwagger', function(msg) {
- if (msg && msg.url) { window.open(msg.url, '_blank'); }
- });
- "))
- ),
-
- tabItems(
- tabItem(tabName = "homePage"
- , class = "active"
- , homepage()
- )
-
- ## Source data
- , sourcedata_ui()
-
- ## Data overview
- , overview_ui()
-
- ## Explore
- , explore_ui()
-
- ## Transform data
- , transform_ui()
-
- ## Combine data
- , combinedata_ui()
-
- ## Summarize data automatic
- , summarize_automatic_ui()
-
- ## Summarize data customize
- , summarize_custom_ui()
-
- ## Research questions
- , research_questions_ui()
-
- ## Setup models
- , setup_models_ui()
-
- ## Feature engineering
- , feature_engineering_ui()
-
- ## Evidence quality
- , evidence_quality_ui()
-
- ## Train models: FIXME: NOW - Remove text in the train_model_ui
- , train_all_model_ui()
-
- ## Validate and deploy models
- , validate_deploy_model_ui()
-
- ## Prediction UI
- , predict_classify_ui()
-
- ## Deep learning UI
- , deeplearning_ui()
-
- ## Cohort constructor
- , cohort_constructor_ui()
-
- ## Achilles
- , achilles_ui()
-
- ## Feature extraction
- , feature_extraction_ui()
-
- ## OMOP visualization
- , omop_visualizations_ui()
-
- ## OMOP resources
- , add_resources_ui()
-
- )
+ headertag,
+ useShinyjs(),
+ useAttendant(),
+ # useWaiter(), #FIXME: Use better one
+ theme = appTheme,
+
+ # -- Handler JS pour capter les clics 'Deploy' dans la DataTable du module --
+ # dashboard_body (une seule fois, avant tabItems)
+ tags$head(
+ tags$script(HTML("
+ Shiny.addCustomMessageHandler('bindDeployBtn', function(msg) {
+ var ns = msg.ns;
+
+ // Nettoie d'abord les handlers (évite doublons)
+ $(document).off('click', '.action-deploy');
+ $(document).off('click', '.action-stop');
+
+ // ---- Deploy ----
+ $(document).on('click', '.action-deploy', function(){
+ var $btn = $(this);
+ var mid = $btn.data('model');
+
+ // Feedback visuel immédiat
+ $btn.prop('disabled', true).text('Deploying...');
+
+ // Envoi à Shiny
+ Shiny.setInputValue(ns + 'deploy_model_id', mid, {priority: 'event'});
+ });
+
+ // ---- Stop ----
+ $(document).on('click', '.action-stop', function(){
+ var $btn = $(this);
+ var mid = $btn.data('model');
+
+ $btn.prop('disabled', true).text('Stopping...');
+
+ Shiny.setInputValue(ns + 'stop_model_id', mid, {priority: 'event'});
+ });
+ });
+
+ Shiny.addCustomMessageHandler('openSwagger', function(msg) {
+ if (msg && msg.url) { window.open(msg.url, '_blank'); }
+ });
+ "))
+ ),
+
+ tabItems(
+ tabItem(tabName = "homePage",
+ class = "active",
+ homepage()
+ )
+
+ ## Source data
+ , sourcedata_ui()
+
+ ## Data overview
+ , overview_ui()
+
+ ## Explore
+ , explore_ui()
+
+ ## Transform data
+ , transform_ui()
+
+ ## Combine data
+ , combinedata_ui()
+
+ ## Summarize data automatic
+ , summarize_automatic_ui()
+
+ ## Summarize data customize
+ , summarize_custom_ui()
+
+ ## Research questions
+ , research_questions_ui()
+
+ ## Setup models
+ , setup_models_ui()
+
+ ## Feature engineering
+ , feature_engineering_ui()
+
+ ## Evidence quality
+ , evidence_quality_ui()
+
+ ## Train models: FIXME: NOW - Remove text in the train_model_ui
+ , train_all_model_ui()
+
+ ## Validate and deploy models
+ , validate_deploy_model_ui()
+
+ ## Prediction UI
+ , predict_classify_ui()
+
+ ## Deep learning UI
+ , deeplearning_ui()
+
+ ## Cohort constructor
+ , cohort_constructor_ui()
+
+ ## Achilles
+ , achilles_ui()
+
+ ## Feature extraction
+ , feature_extraction_ui()
+
+ ## OMOP visualization
+ , omop_visualizations_ui()
+
+ ## OMOP resources
+ , add_resources_ui()
+
+ ## Anonymization
+ , anonymization_quant_ui()
+ , anonymization_qual_ui()
+ )
)
diff --git a/ui/load_r_packages.R b/ui/load_r_packages.R
index d45c665..377b1fe 100644
--- a/ui/load_r_packages.R
+++ b/ui/load_r_packages.R
@@ -92,7 +92,25 @@ libraries <- c(
"MLmetrics",
"fs"
)
-
+library(shiny)
+library(shinyjs)
+library(sortable)
+library(shinyWidgets)
+library(flexdashboard)
+library(shinyAce)
+library(leaflet)
+library(dplyr)
+library(readr)
+library(readxl)
+library(openxlsx)
+library(haven)
+library(shinyjs)
+library(shiny)
+library(digest)
+library(sdcMicro)
+library(rmarkdown)
+library(pagedown)
+library(data.table)
# Install missing CRAN packages
missing <- setdiff(libraries, rownames(installed.packages()))
diff --git a/ui/sidebar_hover_collapse.R b/ui/sidebar_hover_collapse.R
new file mode 100644
index 0000000..0969ba5
--- /dev/null
+++ b/ui/sidebar_hover_collapse.R
@@ -0,0 +1,175 @@
+# ui/sidebar_hover_collapse.R
+
+sidebar_hover_collapse_assets <- function(
+ collapsed_width = 56, # px
+ expanded_width = 240, # px
+ transition_ms = 320, # smooth
+ default_topbar_px = 110 # <- IMPORTANT: fallback if JS can't detect header
+) {
+ shiny::tags$script(shiny::HTML("
+(function () {
+
+ function killAdminLTEHeaderAndToggle() {
+ // Remove the hamburger toggle wherever it exists
+ document.querySelectorAll('.sidebar-toggle, a.sidebar-toggle').forEach(function(el){
+ if (el && el.parentNode) el.parentNode.removeChild(el);
+ });
+
+ // Remove the AdminLTE header bar container if it exists
+ var mh = document.querySelector('.main-header');
+ if (mh && mh.parentNode) mh.parentNode.removeChild(mh);
+ }
+
+ // Run now
+ killAdminLTEHeaderAndToggle();
+
+ // Run again after Shiny attaches UI
+ document.addEventListener('shiny:connected', function(){
+ setTimeout(killAdminLTEHeaderAndToggle, 50);
+ setTimeout(killAdminLTEHeaderAndToggle, 250);
+ setTimeout(killAdminLTEHeaderAndToggle, 800);
+ });
+
+ // Observe DOM changes (login / dynamic UI)
+ var obs = new MutationObserver(function(){ killAdminLTEHeaderAndToggle(); });
+ if (document.body) obs.observe(document.body, { childList: true, subtree: true });
+
+})();
+"))
+
+ css <- sprintf("
+ :root{
+ /* Fallback so sidebar never goes behind the green header */
+ --aphrc-topbar-height: %dpx;
+ }
+
+ /* Sidebar position */
+ .main-sidebar{
+ position: fixed !important;
+ left: 0 !important;
+ top: var(--aphrc-topbar-height) !important;
+ height: calc(100vh - var(--aphrc-topbar-height)) !important;
+
+ overflow-x: hidden;
+ overflow-y: auto;
+
+ width: %dpx !important;
+ transition: width %dms cubic-bezier(0.22, 1, 0.36, 1);
+ will-change: width;
+ z-index: 2000;
+ }
+
+ /* Fix weird top spacing inside sidebar (THIS fixes the 'home icon too high') */
+ .main-sidebar .sidebar{
+ padding-top: 10px !important;
+ }
+ .main-sidebar .sidebar-menu{
+ margin-top: 0 !important;
+ }
+
+ /* Make menu items consistent height + vertically centered */
+ .main-sidebar .sidebar-menu > li > a{
+ height: 44px !important;
+ display: flex !important;
+ align-items: center !important;
+ box-sizing: border-box;
+ }
+
+ /* Keep content aligned to collapsed sidebar (no jump) */
+ .content-wrapper, .right-side, .main-footer{
+ margin-left: %dpx !important;
+ transition: margin-left %dms cubic-bezier(0.22, 1, 0.36, 1);
+ }
+
+ /* Expand on hover */
+ .main-sidebar:hover{
+ width: %dpx !important;
+ }
+
+ /* Hide text when collapsed */
+ .main-sidebar .sidebar-menu > li > a > span,
+ .main-sidebar .sidebar-menu > li > a > .pull-right-container{
+ display: none !important;
+ }
+
+ /* Collapsed: icon centered */
+ .main-sidebar .sidebar-menu > li > a{
+ justify-content: center !important;
+ padding-left: 0 !important;
+ padding-right: 0 !important;
+ }
+ .main-sidebar .sidebar-menu > li > a > i{
+ font-size: 16px;
+ margin: 0 !important;
+ width: auto !important;
+ }
+
+ /* On hover: show text and align left */
+ .main-sidebar:hover .sidebar-menu > li > a{
+ justify-content: flex-start !important;
+ padding-left: 15px !important;
+ padding-right: 15px !important;
+ }
+ .main-sidebar:hover .sidebar-menu > li > a > i{
+ margin-right: 8px !important;
+ }
+ .main-sidebar:hover .sidebar-menu > li > a > span,
+ .main-sidebar:hover .sidebar-menu > li > a > .pull-right-container{
+ display: inline-block !important;
+ }
+
+ /* Keep shinydashboard header disabled (do NOT force-hide main-header aggressively) */
+ .content-wrapper{ padding-top: 0 !important; }
+ ",
+ default_topbar_px,
+ collapsed_width, transition_ms,
+ collapsed_width, transition_ms,
+ expanded_width
+ )
+
+ shiny::tagList(
+ shiny::tags$style(shiny::HTML(css)),
+ shiny::tags$script(shiny::HTML("
+ (function () {
+
+ function pickHeader() {
+ // Try common candidates; we want the GREEN header region container
+ return document.querySelector('#auth_wrapper1 .header')
+ || document.querySelector('.header')
+ || document.querySelector('#auth_wrapper1 header')
+ || document.querySelector('header');
+ }
+
+ function setTopbarHeight() {
+ var el = pickHeader();
+ if (!el) return;
+
+ // Use bottom edge: exactly where green block ends
+ var r = el.getBoundingClientRect();
+ var h = Math.max(0, Math.round(r.bottom));
+ if (h > 20) {
+ document.documentElement.style.setProperty('--aphrc-topbar-height', h + 'px');
+ }
+ }
+
+ // MutationObserver: catches Shiny/login UI changes reliably
+ var obs = new MutationObserver(function(){ setTopbarHeight(); });
+
+ function startObserver() {
+ if (!document.body) return;
+ obs.observe(document.body, { childList: true, subtree: true });
+ setTopbarHeight();
+ }
+
+ if (document.readyState === 'loading') {
+ document.addEventListener('DOMContentLoaded', startObserver);
+ } else {
+ startObserver();
+ }
+
+ window.addEventListener('resize', setTopbarHeight);
+
+ })();
+ "))
+ )
+}
diff --git a/users_db/users.sqlite b/users_db/users.sqlite
index 3019f38..68509dc 100644
Binary files a/users_db/users.sqlite and b/users_db/users.sqlite differ