|
20 | 20 | GENERAL_MODEL_ID, |
21 | 21 | MAIN_APP_ID, |
22 | 22 | MAIN_APP_USER_ID, |
23 | | - _generate_model_outputs, |
| 23 | + # _generate_model_outputs, |
24 | 24 | aio_grpc_channel, |
25 | 25 | async_post_model_outputs_and_maybe_allow_retries, |
26 | 26 | async_raise_on_failure, |
27 | 27 | both_channels, |
28 | 28 | get_channel, |
29 | | - grpc_channel, |
| 29 | + # grpc_channel, |
30 | 30 | metadata, |
31 | 31 | post_model_outputs_and_maybe_allow_retries, |
32 | 32 | raise_on_failure, |
|
38 | 38 | MODEL_TITLE_AND_ID_PAIRS, |
39 | 39 | MULTIMODAL_MODEL_TITLE_AND_IDS, |
40 | 40 | TEXT_HELSINKI_TRANSLATION_MODEL_TITLE_ID_DATA_TUPLE, |
41 | | - TEXT_LLM_MODEL_TITLE_IDS_TUPLE, |
| 41 | + # TEXT_LLM_MODEL_TITLE_IDS_TUPLE, |
42 | 42 | TEXT_MODEL_TITLE_IDS_TUPLE, |
43 | 43 | TRANSLATION_TEST_DATA, |
44 | 44 | ) |
@@ -120,62 +120,62 @@ def test_text_predict_on_public_models(channel_key, title, model_id, app_id, use |
120 | 120 | ) |
121 | 121 |
|
122 | 122 |
|
123 | | -@grpc_channel() |
124 | | -@pytest.mark.parametrize("title, model_id, app_id, user_id", TEXT_LLM_MODEL_TITLE_IDS_TUPLE) |
125 | | -def test_text_predict_on_public_llm_models(channel_key, title, model_id, app_id, user_id): |
126 | | - channel = get_channel(channel_key) |
127 | | - if channel._target != "api.clarifai.com": |
128 | | - pytest.skip(f"Model not available in {channel._target}") |
| 123 | +# @grpc_channel() |
| 124 | +# @pytest.mark.parametrize("title, model_id, app_id, user_id", TEXT_LLM_MODEL_TITLE_IDS_TUPLE) |
| 125 | +# def test_text_predict_on_public_llm_models(channel_key, title, model_id, app_id, user_id): |
| 126 | +# channel = get_channel(channel_key) |
| 127 | +# if channel._target != "api.clarifai.com": |
| 128 | +# pytest.skip(f"Model not available in {channel._target}") |
129 | 129 |
|
130 | | - stub = service_pb2_grpc.V2Stub(channel) |
| 130 | +# stub = service_pb2_grpc.V2Stub(channel) |
131 | 131 |
|
132 | | - request = service_pb2.PostModelOutputsRequest( |
133 | | - user_app_id=resources_pb2.UserAppIDSet(user_id=user_id, app_id=app_id), |
134 | | - model_id=model_id, |
135 | | - inputs=[ |
136 | | - resources_pb2.Input( |
137 | | - data=resources_pb2.Data( |
138 | | - parts=[ |
139 | | - resources_pb2.Part( |
140 | | - id="prompt", |
141 | | - data=resources_pb2.Data( |
142 | | - string_value=TRANSLATION_TEST_DATA["EN"], |
143 | | - ), |
144 | | - ), |
145 | | - resources_pb2.Part( |
146 | | - id="max_tokens", |
147 | | - data=resources_pb2.Data( |
148 | | - int_value=10, |
149 | | - ), |
150 | | - ), |
151 | | - resources_pb2.Part( |
152 | | - id="temperature", |
153 | | - data=resources_pb2.Data( |
154 | | - float_value=0.7, |
155 | | - ), |
156 | | - ), |
157 | | - resources_pb2.Part( |
158 | | - id="top_p", |
159 | | - data=resources_pb2.Data( |
160 | | - float_value=0.95, |
161 | | - ), |
162 | | - ), |
163 | | - ] |
164 | | - ) |
165 | | - ) |
166 | | - ], |
167 | | - ) |
168 | | - response_iterator = _generate_model_outputs(stub, request, metadata(pat=True)) |
| 132 | +# request = service_pb2.PostModelOutputsRequest( |
| 133 | +# user_app_id=resources_pb2.UserAppIDSet(user_id=user_id, app_id=app_id), |
| 134 | +# model_id=model_id, |
| 135 | +# inputs=[ |
| 136 | +# resources_pb2.Input( |
| 137 | +# data=resources_pb2.Data( |
| 138 | +# parts=[ |
| 139 | +# resources_pb2.Part( |
| 140 | +# id="prompt", |
| 141 | +# data=resources_pb2.Data( |
| 142 | +# string_value=TRANSLATION_TEST_DATA["EN"], |
| 143 | +# ), |
| 144 | +# ), |
| 145 | +# resources_pb2.Part( |
| 146 | +# id="max_tokens", |
| 147 | +# data=resources_pb2.Data( |
| 148 | +# int_value=10, |
| 149 | +# ), |
| 150 | +# ), |
| 151 | +# resources_pb2.Part( |
| 152 | +# id="temperature", |
| 153 | +# data=resources_pb2.Data( |
| 154 | +# float_value=0.7, |
| 155 | +# ), |
| 156 | +# ), |
| 157 | +# resources_pb2.Part( |
| 158 | +# id="top_p", |
| 159 | +# data=resources_pb2.Data( |
| 160 | +# float_value=0.95, |
| 161 | +# ), |
| 162 | +# ), |
| 163 | +# ] |
| 164 | +# ) |
| 165 | +# ) |
| 166 | +# ], |
| 167 | +# ) |
| 168 | +# response_iterator = _generate_model_outputs(stub, request, metadata(pat=True)) |
169 | 169 |
|
170 | | - responses_count = 0 |
171 | | - for response in response_iterator: |
172 | | - responses_count += 1 |
173 | | - raise_on_failure( |
174 | | - response, |
175 | | - custom_message=f"Text predict failed for the {title} model (ID: {model_id}).", |
176 | | - ) |
| 170 | +# responses_count = 0 |
| 171 | +# for response in response_iterator: |
| 172 | +# responses_count += 1 |
| 173 | +# raise_on_failure( |
| 174 | +# response, |
| 175 | +# custom_message=f"Text predict failed for the {title} model (ID: {model_id}).", |
| 176 | +# ) |
177 | 177 |
|
178 | | - assert responses_count > 0 |
| 178 | +# assert responses_count > 0 |
179 | 179 |
|
180 | 180 |
|
181 | 181 | @aio_grpc_channel() |
|
0 commit comments