Skip to content

Commit 3e5c3de

Browse files
authored
Merge pull request #108 from Aleph-Alpha/v3
v3 Release
2 parents e25bdd5 + c273d92 commit 3e5c3de

22 files changed

+152
-1755
lines changed

Changelog.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,17 @@
11
# Changelog
22

3+
## 3.0.0
4+
5+
### Breaking Changes
6+
7+
- Removed deprecated `AlephAlphaClient` and `AlephAlphaModel`. Use `Client` or `AsyncClient` instead.
8+
- Removed deprecated `ImagePrompt`. Import `Image` instead for image prompt items.
9+
- New Q&A interface. We've improved the Q&A implementation, and most parameters are no longer needed.
10+
- You only need to specify your documents, a query, and (optional) the max number of answers you want to receive.
11+
- You no longer specify a model.
12+
- Removed "model" parameter from summarize method
13+
- Removed "model_version" from `SummarizationResponse`
14+
315
## 2.17.0
416

517
### Features

aleph_alpha_client/__init__.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
ControlTokenOverlap,
33
Image,
44
ImageControl,
5-
ImagePrompt,
65
Prompt,
76
Text,
87
TextControl,
@@ -11,12 +10,10 @@
1110
)
1211
from .aleph_alpha_client import (
1312
POOLING_OPTIONS,
14-
AlephAlphaClient,
1513
AsyncClient,
1614
Client,
1715
QuotaError,
1816
)
19-
from .aleph_alpha_model import AlephAlphaModel
2017
from .completion import CompletionRequest, CompletionResponse
2118
from .detokenization import DetokenizationRequest, DetokenizationResponse
2219
from .document import Document
@@ -52,8 +49,6 @@
5249
from .version import __version__
5350

5451
__all__ = [
55-
"AlephAlphaClient",
56-
"AlephAlphaModel",
5752
"AsyncClient",
5853
"Client",
5954
"CompletionRequest",
@@ -73,7 +68,6 @@
7368
"ExplanationResponse",
7469
"Image",
7570
"ImageControl",
76-
"ImagePrompt",
7771
"ImagePromptItemExplanation",
7872
"ImageScore",
7973
"POOLING_OPTIONS",

aleph_alpha_client/aleph_alpha_client.py

Lines changed: 8 additions & 829 deletions
Large diffs are not rendered by default.

aleph_alpha_client/aleph_alpha_model.py

Lines changed: 0 additions & 172 deletions
This file was deleted.

aleph_alpha_client/explanation.py

Lines changed: 80 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
# Import Literal with Python 3.7 fallback
1313
from typing_extensions import Literal
1414

15+
from aleph_alpha_client import Text
16+
1517
from aleph_alpha_client.prompt import ControlTokenOverlap, Image, Prompt, PromptItem
1618

1719

@@ -204,6 +206,20 @@ def from_json(score: Any) -> "TextScore":
204206
score=score["score"],
205207
)
206208

209+
class TextScoreWithRaw(NamedTuple):
210+
start: int
211+
length: int
212+
score: float
213+
text: str
214+
215+
@staticmethod
216+
def from_text_score(score: TextScore, prompt: Text) -> "TextScoreWithRaw":
217+
return TextScoreWithRaw(
218+
start=score.start,
219+
length=score.length,
220+
score=score.score,
221+
text=prompt.text[score.start:score.start + score.length],
222+
)
207223

208224
class ImageScore(NamedTuple):
209225
left: float
@@ -236,6 +252,20 @@ def from_json(score: Any) -> "TargetScore":
236252
score=score["score"],
237253
)
238254

255+
class TargetScoreWithRaw(NamedTuple):
256+
start: int
257+
length: int
258+
score: float
259+
text: str
260+
261+
@staticmethod
262+
def from_target_score(score: TargetScore, target: str) -> "TargetScoreWithRaw":
263+
return TargetScoreWithRaw(
264+
start=score.start,
265+
length=score.length,
266+
score=score.score,
267+
text=target[score.start:score.start + score.length],
268+
)
239269

240270
class TokenScore(NamedTuple):
241271
score: float
@@ -275,23 +305,37 @@ def in_pixels(self, prompt_item: PromptItem) -> "ImagePromptItemExplanation":
275305

276306

277307
class TextPromptItemExplanation(NamedTuple):
278-
scores: List[TextScore]
308+
scores: List[Union[TextScore, TextScoreWithRaw]]
279309

280310
@staticmethod
281311
def from_json(item: Dict[str, Any]) -> "TextPromptItemExplanation":
282312
return TextPromptItemExplanation(
283313
scores=[TextScore.from_json(score) for score in item["scores"]]
284314
)
315+
316+
def with_text(self, prompt: Text) -> "TextPromptItemExplanation":
317+
return TextPromptItemExplanation(
318+
scores=[TextScoreWithRaw.from_text_score(score, prompt) if isinstance(score, TextScore) else score for score in self.scores]
319+
)
320+
285321

286322

287323
class TargetPromptItemExplanation(NamedTuple):
288-
scores: List[TargetScore]
324+
scores: List[Union[TargetScore, TargetScoreWithRaw]]
289325

290326
@staticmethod
291327
def from_json(item: Dict[str, Any]) -> "TargetPromptItemExplanation":
292328
return TargetPromptItemExplanation(
293329
scores=[TargetScore.from_json(score) for score in item["scores"]]
294330
)
331+
332+
def with_text(self, prompt: str) -> "TargetPromptItemExplanation":
333+
return TargetPromptItemExplanation(
334+
scores=[TargetScoreWithRaw.from_target_score(score, prompt) if isinstance(score, TargetScore) else score for score in self.scores]
335+
)
336+
337+
338+
295339

296340

297341
class TokenPromptItemExplanation(NamedTuple):
@@ -352,6 +396,31 @@ def with_image_prompt_items_in_pixels(self, prompt: Prompt) -> "Explanation":
352396
],
353397
)
354398

399+
def with_text_from_prompt(self, prompt: Prompt, target: str) -> "Explanation":
400+
items: List[Union[
401+
TextPromptItemExplanation,
402+
ImagePromptItemExplanation,
403+
TargetPromptItemExplanation,
404+
TokenPromptItemExplanation,
405+
]] = []
406+
for item_index, item in enumerate(self.items):
407+
if isinstance(item, TextPromptItemExplanation):
408+
# separate variable to fix linting error
409+
prompt_item = prompt.items[item_index]
410+
if isinstance(prompt_item, Text):
411+
items.append(item.with_text(prompt_item))
412+
else:
413+
items.append(item)
414+
elif isinstance(item, TargetPromptItemExplanation):
415+
items.append(item.with_text(target))
416+
else:
417+
items.append(item)
418+
return Explanation(
419+
target=self.target,
420+
items=items,
421+
)
422+
423+
355424

356425
class ExplanationResponse(NamedTuple):
357426
model_version: str
@@ -375,3 +444,12 @@ def with_image_prompt_items_in_pixels(
375444
for explanation in self.explanations
376445
]
377446
return ExplanationResponse(self.model_version, mapped_explanations)
447+
448+
def with_text_from_prompt(
449+
self, request: ExplanationRequest
450+
) -> "ExplanationResponse":
451+
mapped_explanations = [
452+
explanation.with_text_from_prompt(request.prompt, request.target)
453+
for explanation in self.explanations
454+
]
455+
return ExplanationResponse(self.model_version, mapped_explanations)

0 commit comments

Comments
 (0)