diff --git a/Project.toml b/Project.toml index a214844..d3ab0ce 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "GoogleCloud" uuid = "55e21f81-8b0a-565e-b5ad-6816892a5ee7" -version = "0.11.1" +version = "0.11.2" [deps] Base64 = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" @@ -10,6 +10,7 @@ JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" Libz = "2ec943e9-cfe8-584d-b93d-64dcb6d567b7" Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" MbedTLS = "739be429-bea8-5141-9913-cc70e7f3736d" +Mocking = "78c3b35d-d492-501b-9361-3d52fe80e533" MsgPack = "99f44e22-a591-53d1-9472-aa23ef4bd671" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" diff --git a/src/GoogleCloud.jl b/src/GoogleCloud.jl index 0b77083..2c80981 100644 --- a/src/GoogleCloud.jl +++ b/src/GoogleCloud.jl @@ -7,7 +7,9 @@ export JSONCredentials, MetadataCredentials, GoogleSession, authorize, set_session!, get_session export - iam, storage, compute, container, pubsub, logging, datastore + iam, storage, compute, container, pubsub, logging, datastore, text_service +export + BISON_TEXT_MODEL_NAME, GEKKO_EMBEDDING_MODEL_NAME export KeyStore, commit!, fetch!, sync!, clearcache!, clearpending!, destroy!, connect!, watch, unwatch @@ -33,6 +35,9 @@ import .api: _container.container, _pubsub.pubsub, _logging.logging, - _datastore.datastore + _datastore.datastore, + _text_service.text_service + +using .api._text_service: BISON_TEXT_MODEL_NAME, GEKKO_EMBEDDING_MODEL_NAME end diff --git a/src/api/api.jl b/src/api/api.jl index 837c936..3afd7ff 100644 --- a/src/api/api.jl +++ b/src/api/api.jl @@ -12,6 +12,7 @@ import MbedTLS import Libz import JSON using Markdown +using Mocking using ..session using ..error @@ -238,7 +239,7 @@ function execute(session::GoogleSession, resource::APIResource, method::APIMetho end # obtain and use access token - auth = authorize(session) + auth = @mock authorize(session) headers = Dict{String, String}( "Authorization" => "$(auth[:token_type]) $(auth[:access_token])" ) @@ -288,7 +289,7 @@ function execute(session::GoogleSession, resource::APIResource, method::APIMetho @info("Attempt: $attempt") end res = try - HTTP.request(string(method.verb), + @mock HTTP.request(string(method.verb), path_replace(method.path, path_args), headers, data; query=params ) catch e @@ -362,5 +363,6 @@ include("container.jl") include("pubsub.jl") include("logging.jl") include("datastore.jl") +include("text_service.jl") end diff --git a/src/api/text_service.jl b/src/api/text_service.jl new file mode 100644 index 0000000..cfe04dd --- /dev/null +++ b/src/api/text_service.jl @@ -0,0 +1,24 @@ +module _text_service + +export text_service, BISON_TEXT_MODEL_NAME, GEKKO_EMBEDDING_MODEL_NAME + +using ..api +using ...root + +const BISON_TEXT_MODEL_NAME = "text-bison" +const GEKKO_EMBEDDING_MODEL_NAME = "textembedding-gecko" + + +text_service = APIRoot( + "https://{region}-aiplatform.googleapis.com/v1/projects/{project_id}", + Dict{String,String}( + "cloud-platform" => "Full access", + "cloud-platform.read-only" => "Read only" + ), + PALM=APIResource( + "locations/{region}/publishers/google/models/{model_name}:predict", + predict=APIMethod(:POST, "", "Perform an online prediction.") + ) +) + +end diff --git a/test/fixtures/text_service_response.json b/test/fixtures/text_service_response.json new file mode 100644 index 0000000..7416dca --- /dev/null +++ b/test/fixtures/text_service_response.json @@ -0,0 +1,59 @@ +{ + "metadata": { + "tokenMetadata": { + "inputTokenCount": { + "totalTokens": 3, + "totalBillableCharacters": 17 + }, + "outputTokenCount": { + "totalTokens": 200, + "totalBillableCharacters": 837 + } + } + }, + "predictions": [ + { + "safetyAttributes": { + "scores": [ + 0.1, + 0.1 + ], + "categories": [ + "Finance", + "Health" + ], + "safetyRatings": [ + { + "probabilityScore": 0, + "severity": "NEGLIGIBLE", + "category": "Dangerous Content", + "severityScore": 0 + }, + { + "probabilityScore": 0, + "severity": "NEGLIGIBLE", + "category": "Harassment", + "severityScore": 0 + }, + { + "probabilityScore": 0, + "severity": "NEGLIGIBLE", + "category": "Hate Speech", + "severityScore": 0 + }, + { + "probabilityScore": 0, + "severity": "NEGLIGIBLE", + "category": "Sexually Explicit", + "severityScore": 0 + } + ], + "blocked": false + }, + "content": " As an AI language model, I don't have a physical presence or personal experiences like humans do. However, I can provide you with information about my capabilities and the technology behind me.\n\nI am a large language model trained by Google. My training data includes a vast corpus of text and code from the web, books, and other sources. This training enables me to understand and generate human language, answer questions, write creatively, and assist with various tasks.\n\nMy responses are based on the patterns and knowledge I have learned from the training data. I do not have emotions, opinions, or personal biases. My goal is to provide accurate and informative responses based on the information available to me.\n\nI am continuously learning and improving as I interact with users and receive feedback. The more I am used, the better I become at understanding and responding to human requests.\n\nIf you have any specific questions or tasks you would like assistance with, feel free to ask. I'll do my best", + "citationMetadata": { + "citations": [] + } + } + ] +} diff --git a/test/runtests.jl b/test/runtests.jl index e2f88af..9d582b2 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,5 +1,6 @@ using GoogleCloud -using Test +using Test include("api.jl") -#include("storage.jl") +# include("storage.jl") +include("text_service.jl") diff --git a/test/text_service.jl b/test/text_service.jl new file mode 100644 index 0000000..bd12f3c --- /dev/null +++ b/test/text_service.jl @@ -0,0 +1,55 @@ +using GoogleCloud +using Mocking +using Test +using HTTP +using JSON + +Mocking.activate() + +const FIXTURES_DIR = joinpath(@__DIR__, "fixtures") + +model_params = ( + temperature=0.7, + maxOutputTokens=200, + topP=0.7, + topK=40 +) + +params = Dict( + :instances => [ + Dict(:prompt => "Tell about yourself") + ], + :parameters => model_params +) + +http_response_mock = HTTP.Response( + 200, + Dict("Content-Type" => "application/json"), + read(joinpath(FIXTURES_DIR, "text_service_response.json")) +) + +authorize_response_mock = Dict(:access_token => "test-token", :token_type => "Bearer") + +@testset "Testing text_service" begin + http_patch = @patch HTTP.request(args...; kwargs...) = http_response_mock + authorize_patch = @patch GoogleCloud.api.authorize(_session) = authorize_response_mock + + default_region = "us-central1" + project_id = "test-project-id" + + apply([http_patch, authorize_patch]) do + response = text_service( + :PALM, + :predict, + default_region, + project_id, + default_region, + GoogleCloud.BISON_TEXT_MODEL_NAME, + data=params + ) + + @test response isa AbstractDict + @test haskey(response, :predictions) + @test haskey(response, :metadata) + end +end