File tree Expand file tree Collapse file tree 1 file changed +26
-0
lines changed
Expand file tree Collapse file tree 1 file changed +26
-0
lines changed Original file line number Diff line number Diff line change @@ -69,3 +69,29 @@ def load_model(version: str = "v2"):
6969 model .eval ()
7070 logger .info (f"✅ Model { version } loaded and set to eval mode." )
7171 return model , tokenizer , version
72+
73+
74+
75+ def predict_text (model , tokenizer , text : str ):
76+ """Run inference with automatic handling for both model types."""
77+ try :
78+ if tokenizer : # Hugging Face model or quantized with tokenizer
79+ inputs = tokenizer (text , return_tensors = "pt" , truncation = True , padding = True )
80+ with torch .no_grad ():
81+ outputs = model (** inputs )
82+ logits = outputs .logits
83+ else : # Custom quantized model expecting raw input tensor
84+ if isinstance (text , str ):
85+ # naive fallback if no tokenizer exists
86+ input_tensor = torch .tensor ([[ord (c ) % 256 for c in text [:256 ]]])
87+ with torch .no_grad ():
88+ logits = model (input_tensor .float ())
89+ else :
90+ logits = model (text )
91+
92+ pred = torch .argmax (logits , dim = 1 ).item ()
93+ return "positive" if pred == 1 else "negative"
94+
95+ except Exception as e :
96+ logger .error (f"Prediction error: { e } " )
97+ return "error"
You can’t perform that action at this time.
0 commit comments