From 0e5153b4aa4e73a514f9cc0f73efa56a006b632a Mon Sep 17 00:00:00 2001 From: officialid130-13e13 Date: Sat, 4 Apr 2026 14:02:44 +0530 Subject: [PATCH 1/6] Service agent API changes --- .gitignore | 20 ++ app/api/tasks_api.rb | 14 ++ app/controllers/task_downloads_controller.rb | 193 +++++++++++++++++- app/services/effort_prediction_service.rb | 78 +++++++ config/routes.rb | 2 + ml_services/dummy_model.py | 16 ++ ml_services/effort_model.pth | Bin 0 -> 1997 bytes .../handlers/effort_regression_handler.py | 50 +++++ ml_services/key_file.json | 13 ++ ml_services/model_store/effort-predictor.mar | Bin 0 -> 2159 bytes ml_services/models/effort_model.py | 16 ++ ml_services/scripts/build_mar.sh | 9 + 12 files changed, 410 insertions(+), 1 deletion(-) create mode 100644 app/services/effort_prediction_service.rb create mode 100644 ml_services/dummy_model.py create mode 100644 ml_services/effort_model.pth create mode 100644 ml_services/handlers/effort_regression_handler.py create mode 100644 ml_services/key_file.json create mode 100644 ml_services/model_store/effort-predictor.mar create mode 100644 ml_services/models/effort_model.py create mode 100644 ml_services/scripts/build_mar.sh diff --git a/.gitignore b/.gitignore index 977065f4e4..2a6a9052c0 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,23 @@ _history # Institution specific config config/*_setting.rb !config/no_institution_setting.rb +ml_services/logs/access_log.24-Mar.log.gz +ml_services/logs/access_log.log +ml_services/logs/model_log.24-Mar.log.gz +ml_services/logs/model_log.log +ml_services/logs/model_metrics.log +ml_services/logs/ts_log.24-Mar.log.gz +ml_services/logs/ts_log.log +ml_services/logs/ts_metrics.24-Mar.log.gz +ml_services/logs/ts_metrics.log +ml_services/logs/config/20260324175502069-startup.cfg +ml_services/logs/config/20260324184851096-shutdown.cfg +ml_services/logs/config/20260324184854550-startup.cfg +ml_services/logs/config/20260325144256690-shutdown.cfg +ml_services/logs/config/20260325144319876-startup.cfg +ml_services/logs/config/20260325144545311-shutdown.cfg +ml_services/logs/config/20260325144552586-startup.cfg +ml_services/logs/config/20260325144822051-shutdown.cfg +ml_services/logs/config/20260325144903054-startup.cfg +ml_services/logs/config/20260325163409679-shutdown.cfg +ml_services/logs/config/20260325172728867-startup.cfg diff --git a/app/api/tasks_api.rb b/app/api/tasks_api.rb index a810354367..392705d96a 100644 --- a/app/api/tasks_api.rb +++ b/app/api/tasks_api.rb @@ -519,4 +519,18 @@ class TasksApi < Grape::API true end + + # effort prediction endpoint for task.. + desc 'Predict effort for a task' + params do + requires :features, type: Array[Float], desc: 'Feature values' + end + post :predict_effort do + features = params[:features] + prediction = EffortPredictionService.predict(features) + { predicted_effort: prediction } + end + + + end diff --git a/app/controllers/task_downloads_controller.rb b/app/controllers/task_downloads_controller.rb index 2a0c8d0763..f0bfe4c200 100644 --- a/app/controllers/task_downloads_controller.rb +++ b/app/controllers/task_downloads_controller.rb @@ -49,4 +49,195 @@ def index rescue MyException => e render json: e.message, status: e.status end -end + + #prediction effort function + + protect_from_forgery with: :null_session # allow API POST without CSRF token + #skip_before_action :verify_authenticity_token, only: [:predict_effort] + + # def predict_effort + # features = params[:features] # expects an array of numbers + # effort = EffortPredictionService.predict(features) + + # if effort + # render json: { predicted_effort: effort } + # else + # render json: { error: "Prediction failed" }, status: :unprocessable_entity + # end + # end + + + # def predict_effort + # features = params[:features] + + # # Call TorchServe + # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") + # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) + + # # Parse TorchServe output + # prediction = JSON.parse(response.body) + + # # Handle array vs single value + # prediction_value = prediction.is_a?(Array) ? prediction.first : prediction + + # # Return clean JSON + # render json: { predicted_effort: prediction_value } + # end + + + # def predict_effort + # features = params[:features] + + # # Call TorchServe + # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") + # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) + + # # Debug log raw response (optional) + # Rails.logger.info("TorchServe raw response: #{response.body}") + + # # Parse TorchServe output + # prediction = JSON.parse(response.body) rescue response.body + + # # Handle array vs single value + # prediction_value = + # if prediction.is_a?(Array) + # prediction.first + # elsif prediction.is_a?(Hash) && prediction["prediction"] + # prediction["prediction"] + # else + # prediction + # end + + # # Return clean JSON + # render json: { predicted_effort: prediction_value } + # end + + + # def predict_effort + # features = params[:features] + + # # Call TorchServe + # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") + # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) + + # # Debug log raw response + # Rails.logger.info("TorchServe raw response: #{response.body}") + + # # Parse TorchServe output safely + # prediction = begin + # JSON.parse(response.body) + # rescue JSON::ParserError + # response.body + # end + + # # Normalize output + # prediction_value = + # if prediction.is_a?(Array) + # prediction.first + # elsif prediction.is_a?(Hash) && prediction["prediction"] + # prediction["prediction"] + # else + # prediction + # end + + # # Return clean JSON + # render json: { predicted_effort: prediction_value } + # end + + # def predict_effort + # features = params[:features] + + # # Call TorchServe + # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") + # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) + + # # Parse TorchServe output safely + # prediction = begin + # JSON.parse(response.body) + # rescue JSON::ParserError + # response.body + # end + + # # Normalize output + # prediction_value = + # if prediction.is_a?(Array) + # prediction.first + # elsif prediction.is_a?(Hash) && prediction["prediction"] + # prediction["prediction"] + # else + # prediction + # end + + # # Return clean JSON + # render json: { predicted_effort: prediction_value } + # end + + + + # this one was working +# def predict_effort +# features = params[:features] + +# # Call TorchServe +# uri = URI("http://effort-predictor:8080/predictions/effort-predictor") +# response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) + +# # Parse TorchServe output safely +# prediction = JSON.parse(response.body) rescue response.body + +# # Normalize output +# prediction_value = +# case prediction +# when Array +# prediction.first +# when Hash +# prediction["prediction"] || prediction.values.first +# else +# prediction +# end + +# # Force JSON response +# render json: { predicted_effort: prediction_value } +# end +# end + + + +# POST /tasks/predict_effort + def predict_effort + features = params[:features] + + if features.blank? + render json: { error: "Features parameter is required" }, status: :bad_request + return + end + + uri = URI("http://localhost:8080/predictions/effort-predictor") + headers = { + "Content-Type" => "application/json", + "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" + } + body = { features: features }.to_json + + response = Net::HTTP.post(uri, body, headers) + Rails.logger.info("TorchServe raw response: #{response.body}") + + if response.is_a?(Net::HTTPSuccess) + prediction = JSON.parse(response.body) rescue response.body + prediction_value = + case prediction + when Array + prediction.first + when Hash + prediction["predicted_effort"] || prediction.values.first + else + prediction + end + + render json: { predicted_effort: prediction_value } + else + Rails.logger.error("TorchServe error: #{response.code} #{response.body}") + render json: { error: "Prediction failed" }, status: :internal_server_error + end + end +end \ No newline at end of file diff --git a/app/services/effort_prediction_service.rb b/app/services/effort_prediction_service.rb new file mode 100644 index 0000000000..3cac72218d --- /dev/null +++ b/app/services/effort_prediction_service.rb @@ -0,0 +1,78 @@ +# # app/services/effort_prediction_service.rb +# require 'net/http' +# require 'json' + +# class EffortPredictionService +# TORCHSERVE_URL = "http://localhost:8080/predictions/effort-predictor" + +# def self.predict(features) +# uri = URI(TORCHSERVE_URL) +# headers = { "Content-Type" => "application/json" } +# #headers = { +# # "Content-Type" => "application/json", +# # "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" +# #} + +# body = { features: features }.to_json + +# response = Net::HTTP.post(uri, body, headers) +# parsed = JSON.parse(response.body) + +# parsed["predicted_effort"] +# rescue => e +# Rails.logger.error("EffortPredictionService error: #{e.message}") +# nil +# end +# end + + +# require 'net/http' +# require 'json' + +# class EffortPredictionService +# TORCHSERVE_URL = "http://127.0.0.1:8080/predictions/effort-predictor" + +# def self.predict(features) +# uri = URI(TORCHSERVE_URL) +# headers = { +# "Content-Type" => "application/json", +# "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" +# } +# body = { features: features }.to_json + +# response = Net::HTTP.post(uri, body, headers) + +# if response.is_a?(Net::HTTPSuccess) +# JSON.parse(response.body)["predicted_effort"] +# else +# nil +# end +# end +# end + + +require 'net/http' +require 'json' + +class EffortPredictionService + TORCHSERVE_URL = ENV.fetch("TORCHSERVE_URL", "http://effort-predictor:8080/predictions/effort-predictor") + + def self.predict(features) + uri = URI(TORCHSERVE_URL) + headers = { + "Content-Type" => "application/json", + "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" + } + body = { features: features }.to_json + + response = Net::HTTP.post(uri, body, headers) + + if response.is_a?(Net::HTTPSuccess) + parsed = JSON.parse(response.body) rescue response.body + parsed.is_a?(Hash) ? parsed["predicted_effort"] || parsed.values.first : parsed + else + Rails.logger.error("TorchServe error: #{response.code} #{response.body}") + nil + end + end +end diff --git a/config/routes.rb b/config/routes.rb index ea52a79000..3fb78e606e 100644 --- a/config/routes.rb +++ b/config/routes.rb @@ -5,6 +5,8 @@ get 'api/submission/unit/:id/task_definitions/:task_def_id/download_submissions', to: 'task_downloads#index' get 'api/submission/unit/:id/task_definitions/:task_def_id/student_pdfs', to: 'task_submission_pdfs#index' get 'api/units/:id/all_resources', to: 'lecture_resource_downloads#index' + post 'tasks/predict_effort', to: 'task_downloads#predict_effort' + mount ApiRoot => '/' mount GrapeSwaggerRails::Engine => '/api/docs' diff --git a/ml_services/dummy_model.py b/ml_services/dummy_model.py new file mode 100644 index 0000000000..d505bdd4ab --- /dev/null +++ b/ml_services/dummy_model.py @@ -0,0 +1,16 @@ +import torch +import torch.nn as nn + +class EffortPredictor(nn.Module): + def __init__(self, input_dim): + super(EffortPredictor, self).__init__() + self.fc = nn.Linear(input_dim, 1) + + def forward(self, x): + return self.fc(x) + +# Create a dummy model with 10 input features +model = EffortPredictor(input_dim=10) + +# Save its state dict as effort_model.pth +torch.save(model.state_dict(), "effort_model.pth") diff --git a/ml_services/effort_model.pth b/ml_services/effort_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..be08bddf8afd4cb5014d9d4a61f5730eb3e2bc6b GIT binary patch literal 1997 zcmbVN&ubGw6rMC`lCD-;lcrXyJ=8;rHr=H8S=tIg_Oe1_MF?J&&1REby2*BSCoL@$ zL}@{9_8@{Ncn}dh3KqeL2mb&M9z;)i@*u@aFJ7G4{OHa$QQHYG1H1FS_sx6Xdz<6k z9svA)a9|CAejt}hx`Bj>UX-<|qKL%E@{-17`@qfCy452|x~9nzQuV6IEEq-Ekc(GT z2{8~%EJ9Z}--n?~Qe;(D%L;-$64DJxi3l~MY9=EXa-pVbMFGiGQ#XW_7z5qmaNdWV zOng!-%h1zsVK_|}(e=hd2KMG%_zVlZc{kpr$AUEC3wYfM={2E3#qYl)$sR%gp!SS0xFKZoqR2c~CP8ksR;y@AMBm zp2623mg`!HQ7}taB7&<@5s}c0!DZ9?1!)!LKldo1@p|{`9g<99-D!-0n1e>MK!T{L zrNm9W@rM1H6-gqaa=Ze$hfWaXevUQOgL=LeIN1S z_{6$%2!IUDfoNcg<+6#R5%@MSSAX~7!R*f7`us}i)7xjB`rC(ZF4PxpU#x3mzh)O| zrTU}KPv;54y$|o#!k8VgzcC!{z%a^X1DzP|zI-)Du)p;GevStLAl4l_=&V3nfk(~% zG()0)d{Vyg@A{8}N&03HEBU5us91sPA{jw~TI8}5oo?EyIK8tho65vv(Refwi=|@e zcr3%R>Ev`Ol}@wKL?RiFX5yKQrP7~Q?{Dwt-$e+wC#SuWhsXu+S;TQl;BzrrJQAmTBG>vqihn-;dz6eoZ+fJ}{KRPk&$_-E$Aw0Y(LXCcM0@&qA z1MGAJz+;V0u3ce4ia`)do?_6{v=hNz{c#Fm+lkOwmpR_+z0`C(@1AT!w7$KqecBQh d0O{Vw5$s34hG5D1Z%yf5GN*ta|H%2q**^fN&ustz literal 0 HcmV?d00001 diff --git a/ml_services/handlers/effort_regression_handler.py b/ml_services/handlers/effort_regression_handler.py new file mode 100644 index 0000000000..865afc0942 --- /dev/null +++ b/ml_services/handlers/effort_regression_handler.py @@ -0,0 +1,50 @@ +# from ts.torch_handler.base_handler import BaseHandler +# import torch + +# class EffortRegressionHandler(BaseHandler): +# def postprocess(self, data): +# if isinstance(data, torch.Tensor): +# return {"predicted_effort": float(data.item())} +# return {"predicted_effort": data} + +# def handle(self, data, context): +# try: +# body = data[0].get("body") +# features = json.loads(body)["features"] +# tensor = torch.tensor(features).float().unsqueeze(0) +# output = self.model(tensor).item() +# return [json.dumps({"predicted_effort": output})] +# except Exception as e: +# return [json.dumps({"error": str(e)})] + +import json +import torch +from ts.torch_handler.base_handler import BaseHandler + +class EffortRegressionHandler(BaseHandler): + def postprocess(self, data): + if isinstance(data, torch.Tensor): + return {"predicted_effort": float(data.item())} + return {"predicted_effort": data} + + def handle(self, data, context): + try: + # Extract request body + body = data[0].get("body") + #features = json.loads(body)["features"] + features = body["features"] # no json.loads + + # Convert to tensor + tensor = torch.tensor(features).float().unsqueeze(0) + + # Run model + #output = self.model(tensor).item() + output = max(0.0, self.model(tensor).item()) # to have positive output + + + # Return JSON response + return [json.dumps({"predicted_effort": output})] + # Return a dict inside a list (TorchServe will JSON‑serialize it) + #return [{"predicted_effort": output}] #modify to validate output in swagger + except Exception as e: + return [json.dumps({"error": str(e)})] diff --git a/ml_services/key_file.json b/ml_services/key_file.json new file mode 100644 index 0000000000..3f4a68a626 --- /dev/null +++ b/ml_services/key_file.json @@ -0,0 +1,13 @@ +{ + "management": { + "key": "pIr-sQqC", + "expiration time": "2026-04-03T14:48:03.235441049Z" + }, + "inference": { + "key": "dwQSgtJ4", + "expiration time": "2026-04-03T14:48:03.235433924Z" + }, + "API": { + "key": "Eoh-utgR" + } +} \ No newline at end of file diff --git a/ml_services/model_store/effort-predictor.mar b/ml_services/model_store/effort-predictor.mar new file mode 100644 index 0000000000000000000000000000000000000000..5cf81964da50b9cf79d861abde235cd9779e12c4 GIT binary patch literal 2159 zcmZ`)c{JNu8~%kLL~RrMu0aTjHfkNUQ?;w56~Q#Nk|3&<646q6jM&#&8x3QrU~2DB zTeU@XA!CaQMi(8ey;{C}GxMFEI`iH8$9wL%=eg&-&-=d5<7mqch5-No0Tl1VcxkN* z-`iuIo`C>>pB1tG{`iPUOep>mHbf&V2JL8z>z5OlBiwT81gV&g>Nw}m{pyTMwDhm3 z)%4Ij^=1v4@s3h7cWpkPtb8R`TzA7ngPNY^kk$CmBh$XMzTu8$T7tf`_bWl~3cL{~2BJ}B)tm89LhON->xJmpJbjmZ zhg)&0^=I)4Zxa|I)5pSs6Slg@H#~8Rak>baXGpH-)2J)a=+T|W1E^tRjhN61zFoME zOEYXKw(FGLTfIyo!*pL{&0XEde%A(d{dfqQ>j$_#y!Ec8c9FdTeq^V1cEXgHGkKD1 zP%V@BlIEcH^2f)`l~C2#_O)HjwQumKTR0wD;G!vy-`E`?wtSd>+} z^xj@d&qQi3M@|S@*LE%-ybk!XWxn zB3-|0_(^(QemY-WisdF>?M|a#t2zA?!AVk^%i1IK_=OPlY?|<+Fhhs)TMkXDU%*oN zToT>F#3%D+b8g@*c5}~V=Tnja2&D;i6Xm>V?0a${sRgP1rq-b7ZAZ;*Mf!6S!My^n z**kTZfr3=D`iHF_GS$rLUBzoa{R)8iF0}NOTDzsVPw@GSo6GS-6?D!aY9pmVs`4z= z0BPj$#R&KU>Uae0Bz-0o%HXOD-1{x>N(D3SF!>s)T4PsZbMa9~#kpE1`)XN>d|R*uZiczj8P}=sm>mA z8wf8N8ZV?u5fiWaBSmAih~?CF1STM~Og_4)I-e-?o32nnclcN4c8V;2T!7%b_8do! zp*EvvIcJ1S-IM*kH+nvH$*{8nz$h002z={}e}QmD7Tv8YH1n!ZszI={3w#L4cY^Q^=mRgOo-9869!^BBBNOyuIP zQZ|aoe2o=7v0~?6KtcbpZTe|tc>h`%zDyR+Q$NLZMiVkU1VBJEXD8IatC!6+=W=5P zhDAkTN(?COQ1Yr_QibX*gq&x?yQSN1A}HE5I}riyKDr^?Z_u0FO>pr^l~HubgM*3&A&s{00+!DU!&&Um)@M+#?88GVTJu4c!Hvj)HB9 z_6(`1>G&in3i8hTfNYzVINxJ!^6807@!U{7(!xlLn08-EarV=9>$#WOv#(_*SG^8W z=@o)(7o}dIlN|!oHZ+s6VsxK*TzA^I-d>yyo_ChRAHL0=Gs&9+4jbdsWTgWOD+4{dW#jb z^fm*=hu4q=rjPh>x!2jW0=W$LrQjZ=Nx>Yltdos?8iyA;#W+0O5Y1uUm+NvSh$Bt4 zQCF$_M}II(T;5_pVlp6sckkGVgGPyma#QodKfrPvq#b&9Xhkdg?&P~|QSJc+s^x8% z-QpQC106gz*E8#Cncy&vQXN&sl#Tl(cgY&p5AYtNnZfv?9Ia38g()W+++#fT*`9h< zUcf1^+u3~l18=M9di%$6IBmya{FgSq@k4~zRpE-gpw5~ROOD1*(a^;u?Ht*Cq87{o z8!-YdrMRS-D`nX!MZogx9Q?I^SF@lxzk!$9u0;6f9dsUq5sVMOA~Vl}{#+vIEKBb= zoqjl<+|Rec1zzn0wcBh*NrY#9Ho`eyo!Ek=_e z^w+XH9uSY6NsT#XTS(GM?BzAg_XA^fQ_!l8H7IHMh8+*$Wb|&^VmKH9q z8o>lSE_9s6^H5WOb777ODMH=N(%%G0T_LDfb`+l}zHu2A{HAM$BBH80|CK=uV`d0U z%RPd;T?w?PL6E;h!n|&$E#x8Ul3v6+??z5UW}XwP+QxZa^(K`TFb(UO=Qa4ICx0>o zgR#Y{TBrSAvP%SwO1>PV|6ETfNv?6Pt#%O!GZT=3#N)f`ev^mxKPDU=j$#O0=ahwz zVpM|mG9Ioo-3^@93rM!U^cirp1+l@{|G%ZOzWCRokp3Hv80=B-QK$JE3IG(65{s^X z!2jwyM_)RsOW$5HWvSKoX+#DdMISBF|Impb0?PycwPcRA9IPt<0JAoPm8=s>5CPyn D573hA literal 0 HcmV?d00001 diff --git a/ml_services/models/effort_model.py b/ml_services/models/effort_model.py new file mode 100644 index 0000000000..43baa3fb3b --- /dev/null +++ b/ml_services/models/effort_model.py @@ -0,0 +1,16 @@ +import torch +import torch.nn as nn + +class EffortPredictor(nn.Module): + def __init__(self, input_dim=10): # initialise with a default value + super(EffortPredictor, self).__init__() + self.fc = nn.Linear(input_dim, 1) + + def forward(self, x): + return self.fc(x) + +def get_model(): + model = EffortPredictor(input_dim=10) # adjust to your features + model.load_state_dict(torch.load("effort_model.pth")) + model.eval() + return model diff --git a/ml_services/scripts/build_mar.sh b/ml_services/scripts/build_mar.sh new file mode 100644 index 0000000000..fdbe265ead --- /dev/null +++ b/ml_services/scripts/build_mar.sh @@ -0,0 +1,9 @@ +#!/bin/bash +torch-model-archiver \ + --model-name effort-predictor \ + --version 1.0 \ + --model-file models/effort_model.py \ + --serialized-file effort_model.pth \ + --handler handlers/effort_regression_handler.py \ + --export-path model_store \ + --force From e217d68a2ae64cac964f752036671e8054d72373 Mon Sep 17 00:00:00 2001 From: officialid130-13e13 Date: Sat, 4 Apr 2026 15:24:50 +0530 Subject: [PATCH 2/6] Fix Rubocop style offenses in API code --- Gemfile.lock | 4 ++ app/api/tasks_api.rb | 21 +++--- app/controllers/task_downloads_controller.rb | 75 ++++++++++---------- app/services/effort_prediction_service.rb | 10 +-- config/routes.rb | 1 - 5 files changed, 55 insertions(+), 56 deletions(-) diff --git a/Gemfile.lock b/Gemfile.lock index 6bb903f2a6..0a935f1e2a 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -174,6 +174,7 @@ GEM faraday-net_http (3.4.0) net-http (>= 0.5.0) ffi (1.17.1-aarch64-linux-gnu) + ffi (1.17.1-arm64-darwin) ffi (1.17.1-x86_64-linux-gnu) fugit (1.11.1) et-orbi (~> 1, >= 1.2.11) @@ -281,6 +282,8 @@ GEM nio4r (2.7.4) nokogiri (1.18.7-aarch64-linux-gnu) racc (~> 1.4) + nokogiri (1.18.7-arm64-darwin) + racc (~> 1.4) nokogiri (1.18.7-x86_64-linux-gnu) racc (~> 1.4) numerizer (0.1.1) @@ -559,6 +562,7 @@ GEM PLATFORMS aarch64-linux + arm64-darwin-23 x86_64-linux DEPENDENCIES diff --git a/app/api/tasks_api.rb b/app/api/tasks_api.rb index 392705d96a..fec32c49bf 100644 --- a/app/api/tasks_api.rb +++ b/app/api/tasks_api.rb @@ -519,18 +519,15 @@ class TasksApi < Grape::API true end - - # effort prediction endpoint for task.. + # effort prediction endpoint for task.. desc 'Predict effort for a task' - params do - requires :features, type: Array[Float], desc: 'Feature values' - end - post :predict_effort do - features = params[:features] - prediction = EffortPredictionService.predict(features) - { predicted_effort: prediction } - end - - + params do + requires :features, type: Array[Float], desc: 'Feature values' + end + post :predict_effort do + features = params[:features] + prediction = EffortPredictionService.predict(features) + { predicted_effort: prediction } + end end diff --git a/app/controllers/task_downloads_controller.rb b/app/controllers/task_downloads_controller.rb index f0bfe4c200..be45bcf505 100644 --- a/app/controllers/task_downloads_controller.rb +++ b/app/controllers/task_downloads_controller.rb @@ -50,10 +50,10 @@ def index render json: e.message, status: e.status end - #prediction effort function + # prediction effort function - protect_from_forgery with: :null_session # allow API POST without CSRF token - #skip_before_action :verify_authenticity_token, only: [:predict_effort] + protect_from_forgery with: :null_session # allow API POST without CSRF token + # skip_before_action :verify_authenticity_token, only: [:predict_effort] # def predict_effort # features = params[:features] # expects an array of numbers @@ -66,7 +66,6 @@ def index # end # end - # def predict_effort # features = params[:features] @@ -84,7 +83,6 @@ def index # render json: { predicted_effort: prediction_value } # end - # def predict_effort # features = params[:features] @@ -112,7 +110,6 @@ def index # render json: { predicted_effort: prediction_value } # end - # def predict_effort # features = params[:features] @@ -172,38 +169,34 @@ def index # render json: { predicted_effort: prediction_value } # end - - # this one was working -# def predict_effort -# features = params[:features] - -# # Call TorchServe -# uri = URI("http://effort-predictor:8080/predictions/effort-predictor") -# response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) - -# # Parse TorchServe output safely -# prediction = JSON.parse(response.body) rescue response.body - -# # Normalize output -# prediction_value = -# case prediction -# when Array -# prediction.first -# when Hash -# prediction["prediction"] || prediction.values.first -# else -# prediction -# end - -# # Force JSON response -# render json: { predicted_effort: prediction_value } -# end -# end - - + # def predict_effort + # features = params[:features] + + # # Call TorchServe + # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") + # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) + + # # Parse TorchServe output safely + # prediction = JSON.parse(response.body) rescue response.body + + # # Normalize output + # prediction_value = + # case prediction + # when Array + # prediction.first + # when Hash + # prediction["prediction"] || prediction.values.first + # else + # prediction + # end + + # # Force JSON response + # render json: { predicted_effort: prediction_value } + # end + # end -# POST /tasks/predict_effort + # POST /tasks/predict_effort def predict_effort features = params[:features] @@ -215,7 +208,7 @@ def predict_effort uri = URI("http://localhost:8080/predictions/effort-predictor") headers = { "Content-Type" => "application/json", - "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" + "Authorization" => "Bearer #{ENV.fetch('TORCHSERVE_INFERENCE_KEY', nil)}" } body = { features: features }.to_json @@ -223,7 +216,11 @@ def predict_effort Rails.logger.info("TorchServe raw response: #{response.body}") if response.is_a?(Net::HTTPSuccess) - prediction = JSON.parse(response.body) rescue response.body + prediction = begin + JSON.parse(response.body) + rescue StandardError + response.body + end prediction_value = case prediction when Array @@ -240,4 +237,4 @@ def predict_effort render json: { error: "Prediction failed" }, status: :internal_server_error end end -end \ No newline at end of file +end diff --git a/app/services/effort_prediction_service.rb b/app/services/effort_prediction_service.rb index 3cac72218d..b6e9650037 100644 --- a/app/services/effort_prediction_service.rb +++ b/app/services/effort_prediction_service.rb @@ -25,7 +25,6 @@ # end # end - # require 'net/http' # require 'json' @@ -50,7 +49,6 @@ # end # end - require 'net/http' require 'json' @@ -61,14 +59,18 @@ def self.predict(features) uri = URI(TORCHSERVE_URL) headers = { "Content-Type" => "application/json", - "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" + "Authorization" => "Bearer #{ENV.fetch('TORCHSERVE_INFERENCE_KEY', nil)}" } body = { features: features }.to_json response = Net::HTTP.post(uri, body, headers) if response.is_a?(Net::HTTPSuccess) - parsed = JSON.parse(response.body) rescue response.body + parsed = begin + JSON.parse(response.body) + rescue StandardError + response.body + end parsed.is_a?(Hash) ? parsed["predicted_effort"] || parsed.values.first : parsed else Rails.logger.error("TorchServe error: #{response.code} #{response.body}") diff --git a/config/routes.rb b/config/routes.rb index 3fb78e606e..dc98fcaf84 100644 --- a/config/routes.rb +++ b/config/routes.rb @@ -7,7 +7,6 @@ get 'api/units/:id/all_resources', to: 'lecture_resource_downloads#index' post 'tasks/predict_effort', to: 'task_downloads#predict_effort' - mount ApiRoot => '/' mount GrapeSwaggerRails::Engine => '/api/docs' mount Sidekiq::Web => "/sidekiq" # mount Sidekiq::Web in your Rails app From 7fa276cf58306dee3983556d3ffca14489e52367 Mon Sep 17 00:00:00 2001 From: officialid130-13e13 Date: Tue, 14 Apr 2026 09:54:57 +0530 Subject: [PATCH 3/6] Removed unnecessary commented code --- app/controllers/task_downloads_controller.rb | 141 ------------------ app/services/effort_prediction_service.rb | 51 +------ .../handlers/effort_regression_handler.py | 25 +--- ml_services/key_file.json | 10 +- 4 files changed, 9 insertions(+), 218 deletions(-) diff --git a/app/controllers/task_downloads_controller.rb b/app/controllers/task_downloads_controller.rb index be45bcf505..51d9dd76ed 100644 --- a/app/controllers/task_downloads_controller.rb +++ b/app/controllers/task_downloads_controller.rb @@ -55,147 +55,6 @@ def index protect_from_forgery with: :null_session # allow API POST without CSRF token # skip_before_action :verify_authenticity_token, only: [:predict_effort] - # def predict_effort - # features = params[:features] # expects an array of numbers - # effort = EffortPredictionService.predict(features) - - # if effort - # render json: { predicted_effort: effort } - # else - # render json: { error: "Prediction failed" }, status: :unprocessable_entity - # end - # end - - # def predict_effort - # features = params[:features] - - # # Call TorchServe - # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") - # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) - - # # Parse TorchServe output - # prediction = JSON.parse(response.body) - - # # Handle array vs single value - # prediction_value = prediction.is_a?(Array) ? prediction.first : prediction - - # # Return clean JSON - # render json: { predicted_effort: prediction_value } - # end - - # def predict_effort - # features = params[:features] - - # # Call TorchServe - # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") - # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) - - # # Debug log raw response (optional) - # Rails.logger.info("TorchServe raw response: #{response.body}") - - # # Parse TorchServe output - # prediction = JSON.parse(response.body) rescue response.body - - # # Handle array vs single value - # prediction_value = - # if prediction.is_a?(Array) - # prediction.first - # elsif prediction.is_a?(Hash) && prediction["prediction"] - # prediction["prediction"] - # else - # prediction - # end - - # # Return clean JSON - # render json: { predicted_effort: prediction_value } - # end - - # def predict_effort - # features = params[:features] - - # # Call TorchServe - # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") - # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) - - # # Debug log raw response - # Rails.logger.info("TorchServe raw response: #{response.body}") - - # # Parse TorchServe output safely - # prediction = begin - # JSON.parse(response.body) - # rescue JSON::ParserError - # response.body - # end - - # # Normalize output - # prediction_value = - # if prediction.is_a?(Array) - # prediction.first - # elsif prediction.is_a?(Hash) && prediction["prediction"] - # prediction["prediction"] - # else - # prediction - # end - - # # Return clean JSON - # render json: { predicted_effort: prediction_value } - # end - - # def predict_effort - # features = params[:features] - - # # Call TorchServe - # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") - # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) - - # # Parse TorchServe output safely - # prediction = begin - # JSON.parse(response.body) - # rescue JSON::ParserError - # response.body - # end - - # # Normalize output - # prediction_value = - # if prediction.is_a?(Array) - # prediction.first - # elsif prediction.is_a?(Hash) && prediction["prediction"] - # prediction["prediction"] - # else - # prediction - # end - - # # Return clean JSON - # render json: { predicted_effort: prediction_value } - # end - - # this one was working - # def predict_effort - # features = params[:features] - - # # Call TorchServe - # uri = URI("http://effort-predictor:8080/predictions/effort-predictor") - # response = Net::HTTP.post(uri, features.to_json, { "Content-Type" => "application/json" }) - - # # Parse TorchServe output safely - # prediction = JSON.parse(response.body) rescue response.body - - # # Normalize output - # prediction_value = - # case prediction - # when Array - # prediction.first - # when Hash - # prediction["prediction"] || prediction.values.first - # else - # prediction - # end - - # # Force JSON response - # render json: { predicted_effort: prediction_value } - # end - # end - # POST /tasks/predict_effort def predict_effort features = params[:features] diff --git a/app/services/effort_prediction_service.rb b/app/services/effort_prediction_service.rb index b6e9650037..b2f0449f87 100644 --- a/app/services/effort_prediction_service.rb +++ b/app/services/effort_prediction_service.rb @@ -1,53 +1,4 @@ -# # app/services/effort_prediction_service.rb -# require 'net/http' -# require 'json' - -# class EffortPredictionService -# TORCHSERVE_URL = "http://localhost:8080/predictions/effort-predictor" - -# def self.predict(features) -# uri = URI(TORCHSERVE_URL) -# headers = { "Content-Type" => "application/json" } -# #headers = { -# # "Content-Type" => "application/json", -# # "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" -# #} - -# body = { features: features }.to_json - -# response = Net::HTTP.post(uri, body, headers) -# parsed = JSON.parse(response.body) - -# parsed["predicted_effort"] -# rescue => e -# Rails.logger.error("EffortPredictionService error: #{e.message}") -# nil -# end -# end - -# require 'net/http' -# require 'json' - -# class EffortPredictionService -# TORCHSERVE_URL = "http://127.0.0.1:8080/predictions/effort-predictor" - -# def self.predict(features) -# uri = URI(TORCHSERVE_URL) -# headers = { -# "Content-Type" => "application/json", -# "Authorization" => "Bearer #{ENV['TORCHSERVE_INFERENCE_KEY']}" -# } -# body = { features: features }.to_json - -# response = Net::HTTP.post(uri, body, headers) - -# if response.is_a?(Net::HTTPSuccess) -# JSON.parse(response.body)["predicted_effort"] -# else -# nil -# end -# end -# end +# app/services/effort_prediction_service.rb require 'net/http' require 'json' diff --git a/ml_services/handlers/effort_regression_handler.py b/ml_services/handlers/effort_regression_handler.py index 865afc0942..f53fb735e1 100644 --- a/ml_services/handlers/effort_regression_handler.py +++ b/ml_services/handlers/effort_regression_handler.py @@ -1,21 +1,3 @@ -# from ts.torch_handler.base_handler import BaseHandler -# import torch - -# class EffortRegressionHandler(BaseHandler): -# def postprocess(self, data): -# if isinstance(data, torch.Tensor): -# return {"predicted_effort": float(data.item())} -# return {"predicted_effort": data} - -# def handle(self, data, context): -# try: -# body = data[0].get("body") -# features = json.loads(body)["features"] -# tensor = torch.tensor(features).float().unsqueeze(0) -# output = self.model(tensor).item() -# return [json.dumps({"predicted_effort": output})] -# except Exception as e: -# return [json.dumps({"error": str(e)})] import json import torch @@ -31,20 +13,19 @@ def handle(self, data, context): try: # Extract request body body = data[0].get("body") - #features = json.loads(body)["features"] + features = body["features"] # no json.loads # Convert to tensor tensor = torch.tensor(features).float().unsqueeze(0) # Run model - #output = self.model(tensor).item() + output = max(0.0, self.model(tensor).item()) # to have positive output # Return JSON response return [json.dumps({"predicted_effort": output})] - # Return a dict inside a list (TorchServe will JSON‑serialize it) - #return [{"predicted_effort": output}] #modify to validate output in swagger + except Exception as e: return [json.dumps({"error": str(e)})] diff --git a/ml_services/key_file.json b/ml_services/key_file.json index 3f4a68a626..903a04b42b 100644 --- a/ml_services/key_file.json +++ b/ml_services/key_file.json @@ -1,13 +1,13 @@ { "management": { - "key": "pIr-sQqC", - "expiration time": "2026-04-03T14:48:03.235441049Z" + "key": "ZT_SKghL", + "expiration time": "2026-04-13T15:16:21.380749386Z" }, "inference": { - "key": "dwQSgtJ4", - "expiration time": "2026-04-03T14:48:03.235433924Z" + "key": "AJVoXRTf", + "expiration time": "2026-04-13T15:16:21.380728803Z" }, "API": { - "key": "Eoh-utgR" + "key": "hIvWRijq" } } \ No newline at end of file From 55c4d8bb3c2cdda3c93d98a8c7810f84b3fe0a34 Mon Sep 17 00:00:00 2001 From: officialid130-13e13 Date: Wed, 22 Apr 2026 17:23:12 +0530 Subject: [PATCH 4/6] Change code as per reviewer suggestions to use deligate and remove duplicacy --- .gitignore | 26 ++++----------- app/controllers/task_downloads_controller.rb | 34 +++++--------------- app/services/effort_prediction_service.rb | 14 ++++++++ 3 files changed, 28 insertions(+), 46 deletions(-) diff --git a/.gitignore b/.gitignore index 2a6a9052c0..591b14f571 100644 --- a/.gitignore +++ b/.gitignore @@ -41,23 +41,9 @@ _history # Institution specific config config/*_setting.rb !config/no_institution_setting.rb -ml_services/logs/access_log.24-Mar.log.gz -ml_services/logs/access_log.log -ml_services/logs/model_log.24-Mar.log.gz -ml_services/logs/model_log.log -ml_services/logs/model_metrics.log -ml_services/logs/ts_log.24-Mar.log.gz -ml_services/logs/ts_log.log -ml_services/logs/ts_metrics.24-Mar.log.gz -ml_services/logs/ts_metrics.log -ml_services/logs/config/20260324175502069-startup.cfg -ml_services/logs/config/20260324184851096-shutdown.cfg -ml_services/logs/config/20260324184854550-startup.cfg -ml_services/logs/config/20260325144256690-shutdown.cfg -ml_services/logs/config/20260325144319876-startup.cfg -ml_services/logs/config/20260325144545311-shutdown.cfg -ml_services/logs/config/20260325144552586-startup.cfg -ml_services/logs/config/20260325144822051-shutdown.cfg -ml_services/logs/config/20260325144903054-startup.cfg -ml_services/logs/config/20260325163409679-shutdown.cfg -ml_services/logs/config/20260325172728867-startup.cfg + +# Ignore TorchServe logs and config +ml_services/logs/* + +# Ignore TorchServe auto-generated key file +ml_services/key_file.json diff --git a/app/controllers/task_downloads_controller.rb b/app/controllers/task_downloads_controller.rb index 51d9dd76ed..9bfecd4d1f 100644 --- a/app/controllers/task_downloads_controller.rb +++ b/app/controllers/task_downloads_controller.rb @@ -64,36 +64,18 @@ def predict_effort return end - uri = URI("http://localhost:8080/predictions/effort-predictor") - headers = { - "Content-Type" => "application/json", - "Authorization" => "Bearer #{ENV.fetch('TORCHSERVE_INFERENCE_KEY', nil)}" - } - body = { features: features }.to_json - - response = Net::HTTP.post(uri, body, headers) - Rails.logger.info("TorchServe raw response: #{response.body}") - - if response.is_a?(Net::HTTPSuccess) - prediction = begin - JSON.parse(response.body) - rescue StandardError - response.body - end - prediction_value = - case prediction - when Array - prediction.first - when Hash - prediction["predicted_effort"] || prediction.values.first - else - prediction - end + prediction_value = EffortPredictionService.predicted_effort(features) + if prediction_value render json: { predicted_effort: prediction_value } else - Rails.logger.error("TorchServe error: #{response.code} #{response.body}") render json: { error: "Prediction failed" }, status: :internal_server_error end end + + + + end + + diff --git a/app/services/effort_prediction_service.rb b/app/services/effort_prediction_service.rb index b2f0449f87..8530fcea74 100644 --- a/app/services/effort_prediction_service.rb +++ b/app/services/effort_prediction_service.rb @@ -28,4 +28,18 @@ def self.predict(features) nil end end + + # Added a new helper method + def self.predicted_effort(features) + result = predict(features) + case result + when Array + result.first + when Hash + result["predicted_effort"] || result.values.first + else + result + end + end + end From beb91a7849f8e87a9add9a44d7b7c79b6da80cea Mon Sep 17 00:00:00 2001 From: officialid130-13e13 Date: Wed, 22 Apr 2026 17:39:36 +0530 Subject: [PATCH 5/6] Removed extra blank lines --- app/controllers/task_downloads_controller.rb | 5 ----- 1 file changed, 5 deletions(-) diff --git a/app/controllers/task_downloads_controller.rb b/app/controllers/task_downloads_controller.rb index 9bfecd4d1f..072c66240b 100644 --- a/app/controllers/task_downloads_controller.rb +++ b/app/controllers/task_downloads_controller.rb @@ -73,9 +73,4 @@ def predict_effort end end - - - end - - From e5de1ad60348dae6a2f6625260425470f08abc7e Mon Sep 17 00:00:00 2001 From: officialid130-13e13 Date: Wed, 22 Apr 2026 18:08:38 +0530 Subject: [PATCH 6/6] Removed pdfmanagement-testphase package from dockerfile to fix unit test failure --- texlive.Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/texlive.Dockerfile b/texlive.Dockerfile index c68816ce24..968abf3506 100644 --- a/texlive.Dockerfile +++ b/texlive.Dockerfile @@ -31,7 +31,8 @@ RUN apt-get update && \ ENV PATH=$PATH:/opt/texlive/bin/x86_64-linux:/opt/texlive/bin/aarch64-linux # Install required TeX Live packages for lualatex compilation -RUN tlmgr install \ +RUN tlmgr update --self && tlmgr update --all && tlmgr install \ +#RUN tlmgr install \ catchfile \ csvsimple \ environ \ @@ -53,7 +54,7 @@ RUN tlmgr install \ paralist \ pdfcol \ pdflscape \ - pdfmanagement-testphase \ + #pdfmanagement-testphase \ pdfpages \ tagpdf \ tcolorbox \