diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index b82d544a..e09f1871 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -3,9 +3,9 @@
name: CI
on:
pull_request:
- branches: [ main, '**' ]
+ branches: [ master, '**' ]
push:
- branches: [ main ]
+ branches: [ master ]
jobs:
validate:
@@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: true
matrix:
- scala: [ '2.12.15', '2.13.10', '3.2.2' ]
+ scala: [ '2.12.15', '2.13.11', '3.2.2' ]
steps:
- uses: actions/checkout@v3
@@ -41,23 +41,17 @@ jobs:
uses: actions/setup-java@v3
with:
distribution: 'corretto'
- java-version: '8'
+ java-version: '11'
cache: 'sbt'
- name: Build & Test
- run: sbt ++${{ matrix.scala }} testWithCoverage
-
- - name: Upload coverage report (Cobertura)
- uses: actions/upload-artifact@v3.1.0
- with:
- name: cobertura.xml
- path: ${{github.workspace}}/target/scala-2.13/coverage-report/cobertura.xml
+ run: sbt ++${{ matrix.scala }} clean testWithCoverage
- - name: Upload coverage report (HTML)
+ - name: Upload coverage report (all)
uses: actions/upload-artifact@v3.1.0
with:
- name: scoverage-report-html
- path: ${{github.workspace}}/target/scala-2.13/scoverage-report/
+ name: coverage-report-${{ matrix.scala }}
+ path: ${{github.workspace}}/target/scala-${{ matrix.scala }}/coverage-report
optional-build:
name: Build (Optional)
@@ -70,12 +64,12 @@ jobs:
matrix:
distribution: [ 'corretto' ]
jdk: [ '11' ]
- scala: [ '2.12.15', '2.13.10', '3.2.2' ]
+ scala: [ '2.12.15', '2.13.11', '3.2.2' ]
experimental: [ false ]
include:
- jdk: '17'
distribution: 'corretto'
- scala: '2.13.10'
+ scala: '2.13.11'
experimental: true
steps:
@@ -90,35 +84,35 @@ jobs:
cache: 'sbt'
- name: Perform Build / Test
- run: sbt ++${{ matrix.scala }} compile test
-
- coverage:
- name: Coverage Report
- if: ${{ github.event.pull_request }}
- needs: [ build ]
- runs-on: ubuntu-latest
- steps:
- - uses: actions/download-artifact@v3
- with:
- name: cobertura.xml
-
- - name: Analyzing coverage report
- uses: 5monkeys/cobertura-action@master
- with:
- path: cobertura.xml
- only_changed_files: true
- fail_below_threshold: true
- show_missing: true
- show_line: true
- show_branch: true
- show_class_names: true
- link_missing_lines: true
- minimum_coverage: 75
+ run: sbt ++${{ matrix.scala }} clean compile test
+
+# coverage:
+# name: Coverage Report
+# if: ${{ github.event.pull_request }}
+# needs: [ build ]
+# runs-on: ubuntu-latest
+# steps:
+# - uses: actions/download-artifact@v3
+# with:
+# name: cobertura.xml
+#
+# - name: Analyzing coverage report
+# uses: 5monkeys/cobertura-action@master
+# with:
+# path: cobertura.xml
+# only_changed_files: true
+# fail_below_threshold: true
+# show_missing: true
+# show_line: true
+# show_branch: true
+# show_class_names: true
+# link_missing_lines: true
+# minimum_coverage: 75
ready-to-merge:
name: Ready to Merge
if: ${{ github.event.pull_request }}
- needs: [ optional-build, coverage ]
+ needs: [ optional-build ]
runs-on: ubuntu-latest
steps:
- run: echo 'Ready to merge.'
diff --git a/build.sbt b/build.sbt
index f1820132..b8731b0a 100755
--- a/build.sbt
+++ b/build.sbt
@@ -4,6 +4,7 @@ import sbt.Keys.test
val scala212 = "2.12.18"
val scala213 = "2.13.11"
val scala3 = "3.2.2"
+lazy val supportedScalaVersions = List(scala212, scala213, scala3)
ThisBuild / organization := "io.cequence"
ThisBuild / scalaVersion := scala212
@@ -11,18 +12,22 @@ ThisBuild / version := "0.4.0"
ThisBuild / isSnapshot := false
lazy val core = (project in file("openai-core"))
+ .settings(crossScalaVersions := supportedScalaVersions)
lazy val client = (project in file("openai-client"))
.dependsOn(core)
.aggregate(core)
+ .settings(crossScalaVersions := supportedScalaVersions)
lazy val client_stream = (project in file("openai-client-stream"))
.dependsOn(client)
.aggregate(client)
+ .settings(crossScalaVersions := supportedScalaVersions)
lazy val guice = (project in file("openai-guice"))
.dependsOn(client)
.aggregate(client_stream)
+ .settings(crossScalaVersions := supportedScalaVersions)
// POM settings for Sonatype
ThisBuild / homepage := Some(
@@ -86,7 +91,6 @@ addCommandAlias(
).mkString(";")
)
-
inThisBuild(
List(
scalacOptions += "-Ywarn-unused",
diff --git a/openai-client-stream/build.sbt b/openai-client-stream/build.sbt
index 7da23573..f66e16c6 100644
--- a/openai-client-stream/build.sbt
+++ b/openai-client-stream/build.sbt
@@ -5,5 +5,5 @@ description := "Stream support for the OpenAI Scala client."
val akkaHttpVersion = "10.5.0-M1" // TODO: migrate to 10.5.1
libraryDependencies ++= Seq(
- "com.typesafe.akka" %% "akka-http" % akkaHttpVersion // JSON WS Streaming
-)
\ No newline at end of file
+ "com.typesafe.akka" %% "akka-http" % akkaHttpVersion // JSON WS Streaming
+)
diff --git a/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedExtra.scala b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedExtra.scala
index d009655a..09314397 100644
--- a/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedExtra.scala
+++ b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedExtra.scala
@@ -2,53 +2,74 @@ package io.cequence.openaiscala.service
import akka.NotUsed
import akka.stream.scaladsl.Source
-import io.cequence.openaiscala.domain.{FunctionSpec, MessageSpec}
-import io.cequence.openaiscala.domain.response.{ChatCompletionChunkResponse, ChatCompletionResponse, FineTuneEvent, TextCompletionResponse}
-import io.cequence.openaiscala.domain.settings.{CreateChatCompletionSettings, CreateCompletionSettings}
-
-import scala.concurrent.Future
+import io.cequence.openaiscala.domain.MessageSpec
+import io.cequence.openaiscala.domain.response.{
+ ChatCompletionChunkResponse,
+ FineTuneEvent,
+ TextCompletionResponse
+}
+import io.cequence.openaiscala.domain.settings.{
+ CreateChatCompletionSettings,
+ CreateCompletionSettings
+}
trait OpenAIServiceStreamedExtra extends OpenAIServiceConsts {
- /**
- * Creates a completion for the provided prompt and parameters with streamed results.
- *
- * @param prompt The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
- Note that <|endoftext|> is the document separator that the model sees during training,
- so if a prompt is not specified the model will generate as if from the beginning of a new document.
- * @param settings
- * @return text completion response as a stream (source)
- *
- * @see OpenAI Doc
- */
+ /** Creates a completion for the provided prompt and parameters with streamed
+ * results.
+ *
+ * @param prompt
+ * The prompt(s) to generate completions for, encoded as a string, array of
+ * strings, array of tokens, or array of token arrays. Note that
+ * <|endoftext|> is the document separator that the model sees during
+ * training, so if a prompt is not specified the model will generate as if
+ * from the beginning of a new document.
+ * @param settings
+ * @return
+ * text completion response as a stream (source)
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createCompletionStreamed(
- prompt: String,
- settings: CreateCompletionSettings = DefaultSettings.CreateCompletion
+ prompt: String,
+ settings: CreateCompletionSettings = DefaultSettings.CreateCompletion
): Source[TextCompletionResponse, NotUsed]
- /**
- * Creates a completion for the chat message(s) with streamed results.
- *
- * @param messages A list of messages comprising the conversation so far.
- * @param settings
- * @return chat completion response
- *
- * @see OpenAI Doc
- */
+ /** Creates a completion for the chat message(s) with streamed results.
+ *
+ * @param messages
+ * A list of messages comprising the conversation so far.
+ * @param settings
+ * @return
+ * chat completion response
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createChatCompletionStreamed(
- messages: Seq[MessageSpec],
- settings: CreateChatCompletionSettings = DefaultSettings.CreateChatCompletion
+ messages: Seq[MessageSpec],
+ settings: CreateChatCompletionSettings =
+ DefaultSettings.CreateChatCompletion
): Source[ChatCompletionChunkResponse, NotUsed]
- /**
- * Get fine-grained status updates for a fine-tune job with streamed results.
- *
- * @param fineTuneId The ID of the fine-tune job to get events for.
- * @return fine tune events or None if not found as a stream (source)
- *
- * @see OpenAI Doc
- */
+ /** Get fine-grained status updates for a fine-tune job with streamed results.
+ *
+ * @param fineTuneId
+ * The ID of the fine-tune job to get events for.
+ * @return
+ * fine tune events or None if not found as a stream (source)
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def listFineTuneEventsStreamed(
- fineTuneId: String
+ fineTuneId: String
): Source[FineTuneEvent, NotUsed]
}
diff --git a/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedImpl.scala b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedImpl.scala
index 169c5ec3..680a9826 100644
--- a/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedImpl.scala
+++ b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceStreamedImpl.scala
@@ -10,52 +10,62 @@ import io.cequence.openaiscala.domain.response._
import io.cequence.openaiscala.service.ws.{Timeouts, WSStreamRequestHelper}
import io.cequence.openaiscala.OpenAIScalaClientException
import io.cequence.openaiscala.domain.MessageSpec
-import play.api.libs.json.{JsValue, Json}
+import play.api.libs.json.JsValue
import scala.concurrent.ExecutionContext
-/**
- * Private impl. class of [[OpenAIServiceStreamedExtra]] which offers extra functions with streaming support.
- *
- * @since Jan 2023
- */
-private trait OpenAIServiceStreamedExtraImpl extends OpenAIServiceStreamedExtra with WSStreamRequestHelper {
+/** Private impl. class of [[OpenAIServiceStreamedExtra]] which offers extra
+ * functions with streaming support.
+ *
+ * @since Jan
+ * 2023
+ */
+private trait OpenAIServiceStreamedExtraImpl
+ extends OpenAIServiceStreamedExtra
+ with WSStreamRequestHelper {
this: OpenAIServiceImpl =>
override def createCompletionStreamed(
- prompt: String,
- settings: CreateCompletionSettings
+ prompt: String,
+ settings: CreateCompletionSettings
): Source[TextCompletionResponse, NotUsed] =
execJsonStreamAux(
EndPoint.completions,
"POST",
- bodyParams = createBodyParamsForCompletion(prompt, settings, stream = true)
+ bodyParams =
+ createBodyParamsForCompletion(prompt, settings, stream = true)
).map { (json: JsValue) =>
- (json \ "error").toOption.map { error =>
- throw new OpenAIScalaClientException(error.toString())
- }.getOrElse(
- json.asSafe[TextCompletionResponse]
- )
+ (json \ "error").toOption
+ .map { error =>
+ throw new OpenAIScalaClientException(error.toString())
+ }
+ .getOrElse(
+ json.asSafe[TextCompletionResponse]
+ )
}
override def createChatCompletionStreamed(
- messages: Seq[MessageSpec],
- settings: CreateChatCompletionSettings = DefaultSettings.CreateChatCompletion
+ messages: Seq[MessageSpec],
+ settings: CreateChatCompletionSettings =
+ DefaultSettings.CreateChatCompletion
): Source[ChatCompletionChunkResponse, NotUsed] =
execJsonStreamAux(
EndPoint.chat_completions,
"POST",
- bodyParams = createBodyParamsForChatCompletion(messages, settings, stream = true)
+ bodyParams =
+ createBodyParamsForChatCompletion(messages, settings, stream = true)
).map { (json: JsValue) =>
- (json \ "error").toOption.map { error =>
- throw new OpenAIScalaClientException(error.toString())
- }.getOrElse(
- json.asSafe[ChatCompletionChunkResponse]
- )
+ (json \ "error").toOption
+ .map { error =>
+ throw new OpenAIScalaClientException(error.toString())
+ }
+ .getOrElse(
+ json.asSafe[ChatCompletionChunkResponse]
+ )
}
override def listFineTuneEventsStreamed(
- fineTuneId: String
+ fineTuneId: String
): Source[FineTuneEvent, NotUsed] =
execJsonStreamAux(
EndPoint.fine_tunes,
@@ -65,21 +75,29 @@ private trait OpenAIServiceStreamedExtraImpl extends OpenAIServiceStreamedExtra
Param.stream -> Some(true)
)
).map { json =>
- (json \ "error").toOption.map { error =>
- throw new OpenAIScalaClientException(error.toString())
- }.getOrElse(
- json.asSafe[FineTuneEvent]
- )
+ (json \ "error").toOption
+ .map { error =>
+ throw new OpenAIScalaClientException(error.toString())
+ }
+ .getOrElse(
+ json.asSafe[FineTuneEvent]
+ )
}
}
-object OpenAIServiceStreamedFactory extends OpenAIServiceFactoryHelper[OpenAIService with OpenAIServiceStreamedExtra] {
+object OpenAIServiceStreamedFactory
+ extends OpenAIServiceFactoryHelper[
+ OpenAIService with OpenAIServiceStreamedExtra
+ ] {
override def apply(
- apiKey: String,
- orgId: Option[String] = None,
- timeouts: Option[Timeouts] = None)(
- implicit ec: ExecutionContext, materializer: Materializer
+ apiKey: String,
+ orgId: Option[String] = None,
+ timeouts: Option[Timeouts] = None
+ )(implicit
+ ec: ExecutionContext,
+ materializer: Materializer
): OpenAIService with OpenAIServiceStreamedExtra =
- new OpenAIServiceImpl(apiKey, orgId, timeouts) with OpenAIServiceStreamedExtraImpl
-}
\ No newline at end of file
+ new OpenAIServiceImpl(apiKey, orgId, timeouts)
+ with OpenAIServiceStreamedExtraImpl
+}
diff --git a/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/ws/WSStreamRequestHelper.scala b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/ws/WSStreamRequestHelper.scala
index b095a376..0f035fdc 100644
--- a/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/ws/WSStreamRequestHelper.scala
+++ b/openai-client-stream/src/main/scala/io/cequence/openaiscala/service/ws/WSStreamRequestHelper.scala
@@ -1,47 +1,52 @@
package io.cequence.openaiscala.service.ws
import akka.NotUsed
-import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport}
+import akka.http.scaladsl.common.EntityStreamingSupport
import akka.http.scaladsl.unmarshalling.{Unmarshal, Unmarshaller}
import akka.stream.Materializer
import akka.stream.scaladsl.Framing.FramingException
import akka.stream.scaladsl.{Flow, Framing, Source}
import akka.util.ByteString
import com.fasterxml.jackson.core.JsonParseException
-import io.cequence.openaiscala.{OpenAIScalaClientException, OpenAIScalaClientTimeoutException, OpenAIScalaClientUnknownHostException}
-import play.api.libs.json.{JsNull, JsObject, JsString, JsValue, Json}
+import io.cequence.openaiscala.{
+ OpenAIScalaClientException,
+ OpenAIScalaClientTimeoutException,
+ OpenAIScalaClientUnknownHostException
+}
+import play.api.libs.json.{JsObject, JsString, JsValue, Json}
import play.api.libs.ws.JsonBodyWritables._
import java.net.UnknownHostException
import java.util.concurrent.TimeoutException
-/**
- * Stream request support specifically tailored for OpenAI API.
- *
- * @since Feb 2023
- */
+/** Stream request support specifically tailored for OpenAI API.
+ *
+ * @since Feb
+ * 2023
+ */
trait WSStreamRequestHelper {
this: WSRequestHelper =>
private val itemPrefix = "data: "
private val endOfStreamToken = "[DONE]"
- private implicit val jsonStreamingSupport: JsonEntityStreamingSupport =
- EntityStreamingSupport.json()
+ EntityStreamingSupport.json()
private implicit val jsonMarshaller: Unmarshaller[ByteString, JsValue] =
Unmarshaller.strict[ByteString, JsValue] { byteString =>
val data = byteString.utf8String.stripPrefix(itemPrefix)
- if (data.equals(endOfStreamToken)) JsString(endOfStreamToken) else Json.parse(data)
+ if (data.equals(endOfStreamToken)) JsString(endOfStreamToken)
+ else Json.parse(data)
}
protected def execJsonStreamAux(
- endPoint: PEP,
- method: String,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- bodyParams: Seq[(PT, Option[JsValue])] = Nil)(
- implicit materializer: Materializer
+ endPoint: PEP,
+ method: String,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ bodyParams: Seq[(PT, Option[JsValue])] = Nil
+ )(implicit
+ materializer: Materializer
): Source[JsValue, NotUsed] = {
val source = execStreamRequestAux[JsValue](
endPoint,
@@ -51,8 +56,14 @@ trait WSStreamRequestHelper {
bodyParams,
Framing.delimiter(ByteString("\n\n"), 1000, allowTruncation = true),
{
- case e: JsonParseException => throw new OpenAIScalaClientException(s"$serviceName.$endPoint: 'Response is not a JSON. ${e.getMessage}.")
- case e: FramingException => throw new OpenAIScalaClientException(s"$serviceName.$endPoint: 'Response is not a JSON. ${e.getMessage}.")
+ case e: JsonParseException =>
+ throw new OpenAIScalaClientException(
+ s"$serviceName.$endPoint: 'Response is not a JSON. ${e.getMessage}."
+ )
+ case e: FramingException =>
+ throw new OpenAIScalaClientException(
+ s"$serviceName.$endPoint: 'Response is not a JSON. ${e.getMessage}."
+ )
}
)
@@ -61,32 +72,41 @@ trait WSStreamRequestHelper {
}
protected def execStreamRequestAux[T](
- endPoint: PEP,
- method: String,
- endPointParam: Option[String],
- params: Seq[(PT, Option[Any])],
- bodyParams: Seq[(PT, Option[JsValue])],
- framing: Flow[ByteString, ByteString, NotUsed],
- recoverBlock: PartialFunction[Throwable, T])(
- implicit um: Unmarshaller[ByteString, T], materializer: Materializer
+ endPoint: PEP,
+ method: String,
+ endPointParam: Option[String],
+ params: Seq[(PT, Option[Any])],
+ bodyParams: Seq[(PT, Option[JsValue])],
+ framing: Flow[ByteString, ByteString, NotUsed],
+ recoverBlock: PartialFunction[Throwable, T]
+ )(implicit
+ um: Unmarshaller[ByteString, T],
+ materializer: Materializer
): Source[T, NotUsed] = {
val request = getWSRequestOptional(Some(endPoint), endPointParam, params)
val requestWithBody = if (bodyParams.nonEmpty) {
- val bodyParamsX = bodyParams.collect { case (fieldName, Some(jsValue)) => (fieldName.toString, jsValue) }
+ val bodyParamsX = bodyParams.collect { case (fieldName, Some(jsValue)) =>
+ (fieldName.toString, jsValue)
+ }
request.withBody(JsObject(bodyParamsX))
} else
request
val source =
requestWithBody.withMethod(method).stream().map { response =>
- response
- .bodyAsSource
+ response.bodyAsSource
.via(framing)
- .mapAsync(1)(bytes => Unmarshal(bytes).to[T]) // unmarshal one by one
+ .mapAsync(1)(bytes => Unmarshal(bytes).to[T]) // unmarshal one by one
.recover {
- case e: TimeoutException => throw new OpenAIScalaClientTimeoutException(s"$serviceName.$endPoint timed out: ${e.getMessage}.")
- case e: UnknownHostException => throw new OpenAIScalaClientUnknownHostException(s"$serviceName.$endPoint cannot resolve a host name: ${e.getMessage}.")
+ case e: TimeoutException =>
+ throw new OpenAIScalaClientTimeoutException(
+ s"$serviceName.$endPoint timed out: ${e.getMessage}."
+ )
+ case e: UnknownHostException =>
+ throw new OpenAIScalaClientUnknownHostException(
+ s"$serviceName.$endPoint cannot resolve a host name: ${e.getMessage}."
+ )
}
.recover(recoverBlock) // extra recover
}
diff --git a/openai-client/build.sbt b/openai-client/build.sbt
index 70ac4658..6e7e984e 100644
--- a/openai-client/build.sbt
+++ b/openai-client/build.sbt
@@ -8,7 +8,8 @@ playWsVersion := {
scalaVersion.value match {
case "2.12.18" => "2.1.10"
case "2.13.11" => "2.2.0-M3"
- case "3.2.2" => "2.2.0-M2" // Version "2.2.0-M3" was produced by an unstable release: Scala 3.3.0-RC3
+ case "3.2.2" =>
+ "2.2.0-M2" // Version "2.2.0-M3" was produced by an unstable release: Scala 3.3.0-RC3
case _ => "2.1.10"
}
}
@@ -16,4 +17,4 @@ playWsVersion := {
libraryDependencies ++= Seq(
"com.typesafe.play" %% "play-ahc-ws-standalone" % playWsVersion.value,
"com.typesafe.play" %% "play-ws-standalone-json" % playWsVersion.value
-)
\ No newline at end of file
+)
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/ConfigImplicits.scala b/openai-client/src/main/scala/io/cequence/openaiscala/ConfigImplicits.scala
index d1feb4e1..2be68ba9 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/ConfigImplicits.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/ConfigImplicits.scala
@@ -4,13 +4,15 @@ import com.typesafe.config.Config
object ConfigImplicits {
implicit class ConfigExt(config: Config) {
- def optionalString(configPath: String) =
- if (config.hasPath(configPath)) Some(config.getString(configPath)) else None
+ def optionalString(configPath: String): Option[String] =
+ if (config.hasPath(configPath)) Some(config.getString(configPath))
+ else None
- def optionalInt(configPath: String) =
+ def optionalInt(configPath: String): Option[Int] =
if (config.hasPath(configPath)) Some(config.getInt(configPath)) else None
- def optionalBoolean(configPath: String) =
- if (config.hasPath(configPath)) Some(config.getBoolean(configPath)) else None
+ def optionalBoolean(configPath: String): Option[Boolean] =
+ if (config.hasPath(configPath)) Some(config.getBoolean(configPath))
+ else None
}
}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/JsonFormats.scala b/openai-client/src/main/scala/io/cequence/openaiscala/JsonFormats.scala
index 67023bf6..0c8742c4 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/JsonFormats.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/JsonFormats.scala
@@ -1,36 +1,76 @@
package io.cequence.openaiscala
import io.cequence.openaiscala.JsonUtil.JsonOps
-import io.cequence.openaiscala.domain.{ChatRole, FunMessageSpec, FunctionCallSpec, FunctionSpec, MessageSpec}
+import io.cequence.openaiscala.domain.{
+ ChatRole,
+ FunMessageSpec,
+ FunctionCallSpec,
+ FunctionSpec,
+ MessageSpec
+}
-import java.{util => ju}
import io.cequence.openaiscala.domain.response._
import play.api.libs.functional.syntax._
-import play.api.libs.json.{Format, Json, _}
+import play.api.libs.json._
object JsonFormats {
- private implicit val dateFormat: Format[ju.Date] = JsonUtil.SecDateFormat
+ JsonUtil.SecDateFormat
implicit val PermissionFormat: Format[Permission] = Json.format[Permission]
implicit val modelSpecFormat: Format[ModelInfo] = Json.format[ModelInfo]
implicit val usageInfoFormat: Format[UsageInfo] = Json.format[UsageInfo]
- private implicit val stringDoubleMapFormat: Format[Map[String, Double]] = JsonUtil.StringDoubleMapFormat
- private implicit val stringStringMapFormat: Format[Map[String, String]] = JsonUtil.StringStringMapFormat
+ implicit object StringDoubleMapFormat extends Format[Map[String, Double]] {
+ override def reads(json: JsValue): JsResult[Map[String, Double]] = {
+ val resultJsons = json.asSafe[JsObject].fields.map {
+ case (fieldName, jsValue) => (fieldName, jsValue.as[Double])
+ }
+ JsSuccess(resultJsons.toMap)
+ }
- implicit val logprobsInfoFormat: Format[LogprobsInfo] = Json.format[LogprobsInfo]
- implicit val textCompletionChoiceInfoFormat: Format[TextCompletionChoiceInfo] = Json.format[TextCompletionChoiceInfo]
- implicit val textCompletionFormat: Format[TextCompletionResponse] = Json.format[TextCompletionResponse]
+ override def writes(o: Map[String, Double]): JsValue = {
+ val fields = o.map { case (fieldName, value) =>
+ (fieldName, JsNumber(value))
+ }
+ JsObject(fields)
+ }
+ }
+
+ implicit object StringStringMapFormat extends Format[Map[String, String]] {
+ override def reads(json: JsValue): JsResult[Map[String, String]] = {
+ val resultJsons = json.asSafe[JsObject].fields.map {
+ case (fieldName, jsValue) => (fieldName, jsValue.as[String])
+ }
+ JsSuccess(resultJsons.toMap)
+ }
+
+ override def writes(o: Map[String, String]): JsValue = {
+ val fields = o.map { case (fieldName, value) =>
+ (fieldName, JsString(value))
+ }
+ JsObject(fields)
+ }
+ }
+
+// JsonUtil.StringDoubleMapFormat
+// JsonUtil.StringStringMapFormat
+
+ implicit val logprobsInfoFormat: Format[LogprobsInfo] =
+ Json.format[LogprobsInfo]
+ implicit val textCompletionChoiceInfoFormat
+ : Format[TextCompletionChoiceInfo] = Json.format[TextCompletionChoiceInfo]
+ implicit val textCompletionFormat: Format[TextCompletionResponse] =
+ Json.format[TextCompletionResponse]
implicit object ChatRoleFormat extends Format[ChatRole] {
override def reads(json: JsValue): JsResult[ChatRole] = {
json.asSafe[String] match {
- case "user" => JsSuccess(ChatRole.User)
+ case "user" => JsSuccess(ChatRole.User)
case "assistant" => JsSuccess(ChatRole.Assistant)
- case "system" => JsSuccess(ChatRole.System)
- case "function" => JsSuccess(ChatRole.Function)
- case x => JsError(s"$x is not a valid message role.")
+ case "system" => JsSuccess(ChatRole.System)
+ case "function" => JsSuccess(ChatRole.Function)
+ case x => JsError(s"$x is not a valid message role.")
}
}
@@ -39,67 +79,113 @@ object JsonFormats {
}
}
- implicit val functionCallSpecFormat: Format[FunctionCallSpec] = Json.format[FunctionCallSpec]
+ implicit val functionCallSpecFormat: Format[FunctionCallSpec] =
+ Json.format[FunctionCallSpec]
implicit val messageSpecFormat: Format[MessageSpec] = Json.format[MessageSpec]
- implicit val funMessageSpecFormat: Format[FunMessageSpec] = Json.format[FunMessageSpec]
+ implicit val funMessageSpecFormat: Format[FunMessageSpec] =
+ Json.format[FunMessageSpec]
implicit val functionSpecFormat: Format[FunctionSpec] = {
// use just here for FunctionSpec
- implicit val stringAnyMapFormat: Format[Map[String, Any]] = JsonUtil.StringAnyMapFormat
+ implicit val stringAnyMapFormat: Format[Map[String, Any]] =
+ JsonUtil.StringAnyMapFormat
Json.format[FunctionSpec]
}
- implicit val chatCompletionChoiceInfoFormat: Format[ChatCompletionChoiceInfo] = Json.format[ChatCompletionChoiceInfo]
- implicit val chatCompletionResponseFormat: Format[ChatCompletionResponse] = Json.format[ChatCompletionResponse]
-
- implicit val chatFunCompletionChoiceInfoFormat: Format[ChatFunCompletionChoiceInfo] = Json.format[ChatFunCompletionChoiceInfo]
- implicit val chatFunCompletionResponseFormat: Format[ChatFunCompletionResponse] = Json.format[ChatFunCompletionResponse]
-
- implicit val chatChunkMessageFormat: Format[ChunkMessageSpec] = Json.format[ChunkMessageSpec]
- implicit val chatCompletionChoiceChunkInfoFormat: Format[ChatCompletionChoiceChunkInfo] = Json.format[ChatCompletionChoiceChunkInfo]
- implicit val chatCompletionChunkResponseFormat: Format[ChatCompletionChunkResponse] = Json.format[ChatCompletionChunkResponse]
-
- implicit val textEditChoiceInfoFormat: Format[TextEditChoiceInfo] = Json.format[TextEditChoiceInfo]
- implicit val textEditFormat: Format[TextEditResponse] = Json.format[TextEditResponse]
+ implicit val chatCompletionChoiceInfoFormat
+ : Format[ChatCompletionChoiceInfo] = Json.format[ChatCompletionChoiceInfo]
+ implicit val chatCompletionResponseFormat: Format[ChatCompletionResponse] =
+ Json.format[ChatCompletionResponse]
+
+ implicit val chatFunCompletionChoiceInfoFormat
+ : Format[ChatFunCompletionChoiceInfo] =
+ Json.format[ChatFunCompletionChoiceInfo]
+ implicit val chatFunCompletionResponseFormat
+ : Format[ChatFunCompletionResponse] =
+ Json.format[ChatFunCompletionResponse]
+
+ implicit val chatChunkMessageFormat: Format[ChunkMessageSpec] =
+ Json.format[ChunkMessageSpec]
+ implicit val chatCompletionChoiceChunkInfoFormat
+ : Format[ChatCompletionChoiceChunkInfo] =
+ Json.format[ChatCompletionChoiceChunkInfo]
+ implicit val chatCompletionChunkResponseFormat
+ : Format[ChatCompletionChunkResponse] =
+ Json.format[ChatCompletionChunkResponse]
+
+ implicit val textEditChoiceInfoFormat: Format[TextEditChoiceInfo] =
+ Json.format[TextEditChoiceInfo]
+ implicit val textEditFormat: Format[TextEditResponse] =
+ Json.format[TextEditResponse]
implicit val imageFormat: Format[ImageInfo] = Json.format[ImageInfo]
- implicit val embeddingInfoFormat: Format[EmbeddingInfo] = Json.format[EmbeddingInfo]
- implicit val embeddingUsageInfoFormat: Format[EmbeddingUsageInfo] = Json.format[EmbeddingUsageInfo]
- implicit val embeddingFormat: Format[EmbeddingResponse] = Json.format[EmbeddingResponse]
+ implicit val embeddingInfoFormat: Format[EmbeddingInfo] =
+ Json.format[EmbeddingInfo]
+ implicit val embeddingUsageInfoFormat: Format[EmbeddingUsageInfo] =
+ Json.format[EmbeddingUsageInfo]
+ implicit val embeddingFormat: Format[EmbeddingResponse] =
+ Json.format[EmbeddingResponse]
implicit val fileInfoFormat: Format[FileInfo] = Json.format[FileInfo]
- implicit val fineTuneEventFormat: Format[FineTuneEvent] = Json.format[FineTuneEvent]
- implicit val fineTuneHyperparamsFormat: Format[FineTuneHyperparams] = Json.format[FineTuneHyperparams]
+ implicit val fineTuneEventFormat: Format[FineTuneEvent] =
+ Json.format[FineTuneEvent]
+ implicit val fineTuneHyperparamsFormat: Format[FineTuneHyperparams] =
+ Json.format[FineTuneHyperparams]
implicit val fineTuneFormat: Format[FineTuneJob] = Json.format[FineTuneJob]
// somehow ModerationCategories.unapply is not working in Scala3
implicit val moderationCategoriesFormat: Format[ModerationCategories] = (
(__ \ "hate").format[Boolean] and
- (__ \ "hate/threatening").format[Boolean] and
- (__ \ "self-harm").format[Boolean] and
- (__ \ "sexual").format[Boolean] and
- (__ \ "sexual/minors").format[Boolean] and
- (__ \ "violence").format[Boolean] and
- (__ \ "violence/graphic").format[Boolean]
- )(ModerationCategories.apply, { (x: ModerationCategories) =>
- (x.hate, x.hate_threatening, x.self_harm, x.sexual, x.sexual_minors, x.violence, x.violence_graphic)
- })
+ (__ \ "hate/threatening").format[Boolean] and
+ (__ \ "self-harm").format[Boolean] and
+ (__ \ "sexual").format[Boolean] and
+ (__ \ "sexual/minors").format[Boolean] and
+ (__ \ "violence").format[Boolean] and
+ (__ \ "violence/graphic").format[Boolean]
+ )(
+ ModerationCategories.apply,
+ { (x: ModerationCategories) =>
+ (
+ x.hate,
+ x.hate_threatening,
+ x.self_harm,
+ x.sexual,
+ x.sexual_minors,
+ x.violence,
+ x.violence_graphic
+ )
+ }
+ )
// somehow ModerationCategoryScores.unapply is not working in Scala3
- implicit val moderationCategoryScoresFormat: Format[ModerationCategoryScores] = (
+ implicit val moderationCategoryScoresFormat
+ : Format[ModerationCategoryScores] = (
(__ \ "hate").format[Double] and
- (__ \ "hate/threatening").format[Double] and
- (__ \ "self-harm").format[Double] and
- (__ \ "sexual").format[Double] and
- (__ \ "sexual/minors").format[Double] and
- (__ \ "violence").format[Double] and
- (__ \ "violence/graphic").format[Double]
- )(ModerationCategoryScores.apply, { (x: ModerationCategoryScores) =>
- (x.hate, x.hate_threatening, x.self_harm, x.sexual, x.sexual_minors, x.violence, x.violence_graphic)
- })
-
- implicit val moderationResultFormat: Format[ModerationResult] = Json.format[ModerationResult]
- implicit val moderationFormat: Format[ModerationResponse] = Json.format[ModerationResponse]
+ (__ \ "hate/threatening").format[Double] and
+ (__ \ "self-harm").format[Double] and
+ (__ \ "sexual").format[Double] and
+ (__ \ "sexual/minors").format[Double] and
+ (__ \ "violence").format[Double] and
+ (__ \ "violence/graphic").format[Double]
+ )(
+ ModerationCategoryScores.apply,
+ { (x: ModerationCategoryScores) =>
+ (
+ x.hate,
+ x.hate_threatening,
+ x.self_harm,
+ x.sexual,
+ x.sexual_minors,
+ x.violence,
+ x.violence_graphic
+ )
+ }
+ )
+
+ implicit val moderationResultFormat: Format[ModerationResult] =
+ Json.format[ModerationResult]
+ implicit val moderationFormat: Format[ModerationResponse] =
+ Json.format[ModerationResponse]
}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/JsonUtil.scala b/openai-client/src/main/scala/io/cequence/openaiscala/JsonUtil.scala
index 665823af..c6ec9286 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/JsonUtil.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/JsonUtil.scala
@@ -1,31 +1,72 @@
package io.cequence.openaiscala
-import java.{util => ju}
import play.api.libs.json._
import java.util.Date
+import java.{util => ju}
object JsonUtil {
implicit class JsonOps(val json: JsValue) {
- def asSafe[T](implicit fjs: Reads[T]) =
+ def asSafeArray[T](implicit fjs: Reads[T]): Seq[T] =
+ json
+ .asSafe[JsArray]
+ .value
+ .toSeq
+ .map(
+ _.asSafe[T]
+ )
+
+ def asSafe[T](implicit fjs: Reads[T]): T =
try {
json.validate[T] match {
- case JsSuccess(value, path) => value
+ case JsSuccess(value, _) => value
case JsError(errors) =>
- val errorString = errors.map { case (path, pathErrors) => s"JSON at path '${path}' contains the following errors: ${pathErrors.map(_.message).mkString(";")}" }.mkString("\n")
- throw new OpenAIScalaClientException(s"Unexpected JSON:\n'${Json.prettyPrint(json)}'. Cannot be parsed due to: $errorString")
+ val errorString = errors
+ .map { case (path, pathErrors) =>
+ s"JSON at path '${path}' contains the following errors: ${pathErrors.map(_.message).mkString(";")}"
+ }
+ .mkString("\n")
+ throw new OpenAIScalaClientException(
+ s"Unexpected JSON:\n'${Json.prettyPrint(json)}'. Cannot be parsed due to: $errorString"
+ )
}
} catch {
- case e: Exception => throw new OpenAIScalaClientException(s"Error thrown while processing a JSON '$json'. Cause: ${e.getMessage}")
+ case e: Exception =>
+ throw new OpenAIScalaClientException(
+ s"Error thrown while processing a JSON '$json'. Cause: ${e.getMessage}"
+ )
}
-
- def asSafeArray[T](implicit fjs: Reads[T]): Seq[T] =
- json.asSafe[JsArray].value.toSeq.map(
- _.asSafe[T]
- )
}
+ def toJson(value: Any): JsValue =
+ if (value == null)
+ JsNull
+ else
+ value match {
+ case x: JsValue => x // nothing to do
+ case x: String => JsString(x)
+ case x: BigDecimal => JsNumber(x)
+ case x: Integer => JsNumber(BigDecimal.valueOf(x.toLong))
+ case x: Long => JsNumber(BigDecimal.valueOf(x))
+ case x: Double => JsNumber(BigDecimal.valueOf(x))
+ case x: Float => JsNumber(BigDecimal.valueOf(x.toDouble))
+ case x: Boolean => JsBoolean(x)
+ case x: ju.Date => Json.toJson(x)
+ case x: Option[_] => x.map(toJson).getOrElse(JsNull)
+ case x: Array[_] => JsArray(x.map(toJson))
+ case x: Seq[_] => JsArray(x.map(toJson))
+ case x: Map[String, _] =>
+ val jsonValues = x.map { case (fieldName, value) =>
+ (fieldName, toJson(value))
+ }
+ JsObject(jsonValues)
+ case _ =>
+ throw new IllegalArgumentException(
+ s"No JSON formatter found for the class ${value.getClass.getName}."
+ )
+ }
+
object SecDateFormat extends Format[ju.Date] {
override def reads(json: JsValue): JsResult[Date] = {
json match {
@@ -49,62 +90,19 @@ object JsonUtil {
JsNumber(Math.round(o.getTime.toDouble / 1000))
}
- def toJson(value: Any): JsValue =
- if (value == null)
- JsNull
- else
- value match {
- case x: JsValue => x // nothing to do
- case x: String => JsString(x)
- case x: BigDecimal => JsNumber(x)
- case x: Integer => JsNumber(BigDecimal.valueOf(x.toLong))
- case x: Long => JsNumber(BigDecimal.valueOf(x))
- case x: Double => JsNumber(BigDecimal.valueOf(x))
- case x: Float => JsNumber(BigDecimal.valueOf(x.toDouble))
- case x: Boolean => JsBoolean(x)
- case x: ju.Date => Json.toJson(x)
- case x: Option[_] => x.map(toJson).getOrElse(JsNull)
- case x: Array[_] => JsArray(x.map(toJson))
- case x: Seq[_] => JsArray(x.map(toJson))
- case x: Map[String, _] =>
- val jsonValues = x.map { case (fieldName, value) => (fieldName, toJson(value)) }
- JsObject(jsonValues)
- case _ => throw new IllegalArgumentException(s"No JSON formatter found for the class ${value.getClass.getName}.")
- }
-
- object StringDoubleMapFormat extends Format[Map[String, Double]] {
- override def reads(json: JsValue): JsResult[Map[String, Double]] = {
- val resultJsons = json.asSafe[JsObject].fields.map { case (fieldName, jsValue) => (fieldName, jsValue.as[Double]) }
- JsSuccess(resultJsons.toMap)
- }
-
- override def writes(o: Map[String, Double]): JsValue = {
- val fields = o.map { case (fieldName, value) => (fieldName, JsNumber(value)) }
- JsObject(fields)
- }
- }
-
- object StringStringMapFormat extends Format[Map[String, String]] {
- override def reads(json: JsValue): JsResult[Map[String, String]] = {
- val resultJsons = json.asSafe[JsObject].fields.map { case (fieldName, jsValue) => (fieldName, jsValue.as[String]) }
- JsSuccess(resultJsons.toMap)
- }
-
- override def writes(o: Map[String, String]): JsValue = {
- val fields = o.map { case (fieldName, value) => (fieldName, JsString(value)) }
- JsObject(fields)
- }
- }
-
object StringAnyMapFormat extends Format[Map[String, Any]] {
override def reads(json: JsValue): JsResult[Map[String, Any]] = {
- val resultJsons = json.asSafe[JsObject].fields.map { case (fieldName, jsValue) => (fieldName, jsValue.toString) }
+ val resultJsons = json.asSafe[JsObject].fields.map {
+ case (fieldName, jsValue) => (fieldName, jsValue.toString)
+ }
JsSuccess(resultJsons.toMap)
}
override def writes(o: Map[String, Any]): JsValue = {
- val fields = o.map { case (fieldName, value) => (fieldName, toJson(value)) }
+ val fields = o.map { case (fieldName, value) =>
+ (fieldName, toJson(value))
+ }
JsObject(fields)
}
}
-}
\ No newline at end of file
+}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/EndPoint.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/EndPoint.scala
index 4bc4888c..50659de1 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/EndPoint.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/EndPoint.scala
@@ -61,4 +61,4 @@ object Param {
case object language extends Param
case object functions extends Param
case object function_call extends Param
-}
\ No newline at end of file
+}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceFactoryHelper.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceFactoryHelper.scala
index 391ceb7e..9e0fc9d0 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceFactoryHelper.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceFactoryHelper.scala
@@ -10,23 +10,28 @@ import scala.concurrent.ExecutionContext
trait OpenAIServiceFactoryHelper[F] extends OpenAIServiceConsts {
def apply(
- apiKey: String,
- orgId: Option[String] = None,
- timeouts: Option[Timeouts] = None)(
- implicit ec: ExecutionContext, materializer: Materializer
+ apiKey: String,
+ orgId: Option[String] = None,
+ timeouts: Option[Timeouts] = None
+ )(implicit
+ ec: ExecutionContext,
+ materializer: Materializer
): F
- def apply()(
- implicit ec: ExecutionContext, materializer: Materializer
+ def apply()(implicit
+ ec: ExecutionContext,
+ materializer: Materializer
): F =
apply(ConfigFactory.load(configFileName))
- def apply(
- config: Config)(
- implicit ec: ExecutionContext, materializer: Materializer
+ def apply(config: Config)(implicit
+ ec: ExecutionContext,
+ materializer: Materializer
): F = {
def intTimeoutAux(fieldName: String) =
- config.optionalInt(s"$configPrefix.timeouts.${fieldName}Sec").map(_ * 1000)
+ config
+ .optionalInt(s"$configPrefix.timeouts.${fieldName}Sec")
+ .map(_ * 1000)
val timeouts = Timeouts(
requestTimeout = intTimeoutAux("requestTimeout"),
@@ -39,7 +44,8 @@ trait OpenAIServiceFactoryHelper[F] extends OpenAIServiceConsts {
apiKey = config.getString(s"$configPrefix.apiKey"),
orgId = config.optionalString(s"$configPrefix.orgId"),
timeouts =
- if (timeouts.requestTimeout.isDefined
+ if (
+ timeouts.requestTimeout.isDefined
|| timeouts.readTimeout.isDefined
|| timeouts.connectTimeout.isDefined
|| timeouts.pooledConnectionIdleTimeout.isDefined
@@ -49,4 +55,4 @@ trait OpenAIServiceFactoryHelper[F] extends OpenAIServiceConsts {
None
)
}
-}
\ No newline at end of file
+}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceImpl.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceImpl.scala
index e880efaf..c1da3ce3 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceImpl.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceImpl.scala
@@ -8,29 +8,36 @@ import io.cequence.openaiscala.JsonFormats._
import io.cequence.openaiscala.OpenAIScalaClientException
import io.cequence.openaiscala.domain.settings._
import io.cequence.openaiscala.domain.response._
-import io.cequence.openaiscala.ConfigImplicits._
-import io.cequence.openaiscala.domain.{BaseMessageSpec, FunMessageSpec, FunctionSpec, MessageSpec}
+import io.cequence.openaiscala.domain.{
+ BaseMessageSpec,
+ FunMessageSpec,
+ FunctionSpec,
+ MessageSpec
+}
import io.cequence.openaiscala.service.ws.{Timeouts, WSRequestHelper}
import java.io.File
import scala.concurrent.{ExecutionContext, Future}
-/**
- * Private impl. class of [[OpenAIService]].
- *
- * @param apiKey
- * @param orgId
- * @param ec
- * @param materializer
- *
- * @since Jan 2023
- */
+/** Private impl. class of [[OpenAIService]].
+ *
+ * @param apiKey
+ * @param orgId
+ * @param ec
+ * @param materializer
+ *
+ * @since Jan
+ * 2023
+ */
private class OpenAIServiceImpl(
- apiKey: String,
- orgId: Option[String] = None,
- explTimeouts: Option[Timeouts] = None)(
- implicit val ec: ExecutionContext, val materializer: Materializer
-) extends OpenAIService with WSRequestHelper {
+ apiKey: String,
+ orgId: Option[String] = None,
+ explTimeouts: Option[Timeouts] = None
+)(implicit
+ val ec: ExecutionContext,
+ val materializer: Materializer
+) extends OpenAIService
+ with WSRequestHelper {
override protected type PEP = EndPoint
override protected type PT = Param
@@ -45,15 +52,19 @@ private class OpenAIServiceImpl(
override def listModels: Future[Seq[ModelInfo]] =
execGET(EndPoint.models).map { response =>
- (response.asSafe[JsObject] \ "data").toOption.map {
- _.asSafeArray[ModelInfo]
- }.getOrElse(
- throw new OpenAIScalaClientException(s"The attribute 'data' is not present in the response: ${response.toString()}.")
- )
+ (response.asSafe[JsObject] \ "data").toOption
+ .map {
+ _.asSafeArray[ModelInfo]
+ }
+ .getOrElse(
+ throw new OpenAIScalaClientException(
+ s"The attribute 'data' is not present in the response: ${response.toString()}."
+ )
+ )
}
override def retrieveModel(
- modelId: String
+ modelId: String
): Future[Option[ModelInfo]] =
execGETWithStatus(
EndPoint.models,
@@ -63,21 +74,22 @@ private class OpenAIServiceImpl(
}
override def createCompletion(
- prompt: String,
- settings: CreateCompletionSettings
+ prompt: String,
+ settings: CreateCompletionSettings
): Future[TextCompletionResponse] =
execPOST(
EndPoint.completions,
- bodyParams = createBodyParamsForCompletion(prompt, settings, stream = false)
+ bodyParams =
+ createBodyParamsForCompletion(prompt, settings, stream = false)
).map(
_.asSafe[TextCompletionResponse]
)
protected def createBodyParamsForCompletion(
- prompt: String,
- settings: CreateCompletionSettings,
- stream: Boolean
- ) =
+ prompt: String,
+ settings: CreateCompletionSettings,
+ stream: Boolean
+ ): Seq[(Param, Option[JsValue])] =
jsonBodyParams(
Param.prompt -> Some(prompt),
Param.model -> Some(settings.model),
@@ -106,27 +118,31 @@ private class OpenAIServiceImpl(
)
override def createChatCompletion(
- messages: Seq[MessageSpec],
- settings: CreateChatCompletionSettings
+ messages: Seq[MessageSpec],
+ settings: CreateChatCompletionSettings
): Future[ChatCompletionResponse] =
execPOST(
EndPoint.chat_completions,
- bodyParams = createBodyParamsForChatCompletion(messages, settings, stream = false)
+ bodyParams =
+ createBodyParamsForChatCompletion(messages, settings, stream = false)
).map(
_.asSafe[ChatCompletionResponse]
)
override def createChatFunCompletion(
- messages: Seq[FunMessageSpec],
- functions: Seq[FunctionSpec],
- responseFunctionName: Option[String],
- settings: CreateChatCompletionSettings
+ messages: Seq[FunMessageSpec],
+ functions: Seq[FunctionSpec],
+ responseFunctionName: Option[String],
+ settings: CreateChatCompletionSettings
): Future[ChatFunCompletionResponse] = {
- val coreParams = createBodyParamsForChatCompletion(messages, settings, stream = false)
+ val coreParams =
+ createBodyParamsForChatCompletion(messages, settings, stream = false)
val extraParams = jsonBodyParams(
Param.functions -> Some(Json.toJson(functions)),
- Param.function_call -> responseFunctionName.map(name => Map("name" -> name)), // otherwise "auto" is used by default
+ Param.function_call -> responseFunctionName.map(name =>
+ Map("name" -> name)
+ ) // otherwise "auto" is used by default
)
execPOST(
@@ -138,10 +154,10 @@ private class OpenAIServiceImpl(
}
protected def createBodyParamsForChatCompletion(
- messages: Seq[BaseMessageSpec],
- settings: CreateChatCompletionSettings,
- stream: Boolean
- ) = {
+ messages: Seq[BaseMessageSpec],
+ settings: CreateChatCompletionSettings,
+ stream: Boolean
+ ): Seq[(Param, Option[JsValue])] = {
assert(messages.nonEmpty, "At least one message expected.")
val messageJsons = messages.map(_ match {
case m: MessageSpec =>
@@ -149,9 +165,11 @@ private class OpenAIServiceImpl(
case m: FunMessageSpec =>
val json = Json.toJson(m)(funMessageSpecFormat)
// if the content is empty, add a null value (expected by the API)
- m.content.map(_ => json).getOrElse(
- json.as[JsObject].+("content" -> JsNull)
- )
+ m.content
+ .map(_ => json)
+ .getOrElse(
+ json.as[JsObject].+("content" -> JsNull)
+ )
})
jsonBodyParams(
@@ -179,9 +197,9 @@ private class OpenAIServiceImpl(
}
override def createEdit(
- input: String,
- instruction: String,
- settings: CreateEditSettings
+ input: String,
+ instruction: String,
+ settings: CreateEditSettings
): Future[TextEditResponse] =
execPOST(
EndPoint.edits,
@@ -198,8 +216,8 @@ private class OpenAIServiceImpl(
)
override def createImage(
- prompt: String,
- settings: CreateImageSettings
+ prompt: String,
+ settings: CreateImageSettings
): Future[ImageInfo] =
execPOST(
EndPoint.images_generations,
@@ -215,14 +233,15 @@ private class OpenAIServiceImpl(
)
override def createImageEdit(
- prompt: String,
- image: File,
- mask: Option[File] = None,
- settings: CreateImageSettings
+ prompt: String,
+ image: File,
+ mask: Option[File] = None,
+ settings: CreateImageSettings
): Future[ImageInfo] =
execPOSTMultipart(
EndPoint.images_edits,
- fileParams = Seq((Param.image, image, None)) ++ mask.map((Param.mask, _, None)),
+ fileParams =
+ Seq((Param.image, image, None)) ++ mask.map((Param.mask, _, None)),
bodyParams = Seq(
Param.prompt -> Some(prompt),
Param.n -> settings.n,
@@ -235,8 +254,8 @@ private class OpenAIServiceImpl(
)
override def createImageVariation(
- image: File,
- settings: CreateImageSettings
+ image: File,
+ settings: CreateImageSettings
): Future[ImageInfo] =
execPOSTMultipart(
EndPoint.images_variations,
@@ -252,8 +271,8 @@ private class OpenAIServiceImpl(
)
override def createEmbeddings(
- input: Seq[String],
- settings: CreateEmbeddingsSettings
+ input: Seq[String],
+ settings: CreateEmbeddingsSettings
): Future[EmbeddingResponse] =
execPOST(
EndPoint.embeddings,
@@ -273,9 +292,9 @@ private class OpenAIServiceImpl(
)
override def createAudioTranscription(
- file: File,
- prompt: Option[String],
- settings: CreateTranscriptionSettings
+ file: File,
+ prompt: Option[String],
+ settings: CreateTranscriptionSettings
): Future[TranscriptResponse] =
execPOSTMultipartWithStatusString(
EndPoint.audio_transcriptions,
@@ -290,9 +309,9 @@ private class OpenAIServiceImpl(
).map(processAudioTranscriptResponse(settings.response_format))
override def createAudioTranslation(
- file: File,
- prompt: Option[String],
- settings: CreateTranslationSettings
+ file: File,
+ prompt: Option[String],
+ settings: CreateTranslationSettings
): Future[TranscriptResponse] =
execPOSTMultipartWithStatusString(
EndPoint.audio_translations,
@@ -306,17 +325,22 @@ private class OpenAIServiceImpl(
).map(processAudioTranscriptResponse(settings.response_format))
private def processAudioTranscriptResponse(
- responseFormat: Option[TranscriptResponseFormatType])(
- stringRichResponse: RichStringResponse
+ responseFormat: Option[TranscriptResponseFormatType]
+ )(
+ stringRichResponse: RichStringResponse
) = {
val stringResponse = handleErrorResponse(stringRichResponse)
def textFromJsonString(json: JsValue) =
- (json.asSafe[JsObject] \ "text").toOption.map {
- _.asSafe[String]
- }.getOrElse(
- throw new OpenAIScalaClientException(s"The attribute 'text' is not present in the response: ${stringResponse}.")
- )
+ (json.asSafe[JsObject] \ "text").toOption
+ .map {
+ _.asSafe[String]
+ }
+ .getOrElse(
+ throw new OpenAIScalaClientException(
+ s"The attribute 'text' is not present in the response: ${stringResponse}."
+ )
+ )
val FormatType = TranscriptResponseFormatType
@@ -339,17 +363,21 @@ private class OpenAIServiceImpl(
override def listFiles: Future[Seq[FileInfo]] =
execGET(EndPoint.files).map { response =>
- (response.asSafe[JsObject] \ "data").toOption.map {
- _.asSafeArray[FileInfo]
- }.getOrElse(
- throw new OpenAIScalaClientException(s"The attribute 'data' is not present in the response: ${response.toString()}.")
- )
+ (response.asSafe[JsObject] \ "data").toOption
+ .map {
+ _.asSafeArray[FileInfo]
+ }
+ .getOrElse(
+ throw new OpenAIScalaClientException(
+ s"The attribute 'data' is not present in the response: ${response.toString()}."
+ )
+ )
}
override def uploadFile(
- file: File,
- displayFileName: Option[String],
- settings: UploadFileSettings
+ file: File,
+ displayFileName: Option[String],
+ settings: UploadFileSettings
): Future[FileInfo] =
execPOSTMultipart(
EndPoint.files,
@@ -362,29 +390,35 @@ private class OpenAIServiceImpl(
)
override def deleteFile(
- fileId: String
+ fileId: String
): Future[DeleteResponse] =
execDELETEWithStatus(
EndPoint.files,
endPointParam = Some(fileId)
- ).map( response =>
- handleNotFoundAndError(response).map(jsResponse =>
- (jsResponse \ "deleted").toOption.map {
- _.asSafe[Boolean] match {
- case true => DeleteResponse.Deleted
- case false => DeleteResponse.NotDeleted
- }
- }.getOrElse(
- throw new OpenAIScalaClientException(s"The attribute 'deleted' is not present in the response: ${response.toString()}.")
+ ).map(response =>
+ handleNotFoundAndError(response)
+ .map(jsResponse =>
+ (jsResponse \ "deleted").toOption
+ .map {
+ _.asSafe[Boolean] match {
+ case true => DeleteResponse.Deleted
+ case false => DeleteResponse.NotDeleted
+ }
+ }
+ .getOrElse(
+ throw new OpenAIScalaClientException(
+ s"The attribute 'deleted' is not present in the response: ${response.toString()}."
+ )
+ )
+ )
+ .getOrElse(
+ // we got a not-found http code (404)
+ DeleteResponse.NotFound
)
- ).getOrElse(
- // we got a not-found http code (404)
- DeleteResponse.NotFound
- )
)
override def retrieveFile(
- fileId: String
+ fileId: String
): Future[Option[FileInfo]] =
execGETWithStatus(
EndPoint.files,
@@ -395,7 +429,7 @@ private class OpenAIServiceImpl(
// because the output type here is string we need to do bit of a manual request building and calling
override def retrieveFileContent(
- fileId: String
+ fileId: String
): Future[Option[String]] = {
val endPoint = EndPoint.files
val endPointParam = Some(s"${fileId}/content")
@@ -408,9 +442,9 @@ private class OpenAIServiceImpl(
}
override def createFineTune(
- training_file: String,
- validation_file: Option[String] = None,
- settings: CreateFineTuneSettings
+ training_file: String,
+ validation_file: Option[String] = None,
+ settings: CreateFineTuneSettings
): Future[FineTuneJob] =
execPOST(
EndPoint.fine_tunes,
@@ -434,15 +468,19 @@ private class OpenAIServiceImpl(
override def listFineTunes: Future[Seq[FineTuneJob]] =
execGET(EndPoint.fine_tunes).map { response =>
- (response.asSafe[JsObject] \ "data").toOption.map {
- _.asSafeArray[FineTuneJob]
- }.getOrElse(
- throw new OpenAIScalaClientException(s"The attribute 'data' is not present in the response: ${response.toString()}.")
- )
+ (response.asSafe[JsObject] \ "data").toOption
+ .map {
+ _.asSafeArray[FineTuneJob]
+ }
+ .getOrElse(
+ throw new OpenAIScalaClientException(
+ s"The attribute 'data' is not present in the response: ${response.toString()}."
+ )
+ )
}
override def retrieveFineTune(
- fineTuneId: String
+ fineTuneId: String
): Future[Option[FineTuneJob]] =
execGETWithStatus(
EndPoint.fine_tunes,
@@ -452,7 +490,7 @@ private class OpenAIServiceImpl(
)
override def cancelFineTune(
- fineTuneId: String
+ fineTuneId: String
): Future[Option[FineTuneJob]] =
execPOSTWithStatus(
EndPoint.fine_tunes,
@@ -462,7 +500,7 @@ private class OpenAIServiceImpl(
)
override def listFineTuneEvents(
- fineTuneId: String
+ fineTuneId: String
): Future[Option[Seq[FineTuneEvent]]] =
execGETWithStatus(
EndPoint.fine_tunes,
@@ -472,39 +510,49 @@ private class OpenAIServiceImpl(
)
).map { response =>
handleNotFoundAndError(response).map(jsResponse =>
- (jsResponse.asSafe[JsObject] \ "data").toOption.map {
- _.asSafeArray[FineTuneEvent]
- }.getOrElse(
- throw new OpenAIScalaClientException(s"The attribute 'data' is not present in the response: ${response.toString()}.")
- )
+ (jsResponse.asSafe[JsObject] \ "data").toOption
+ .map {
+ _.asSafeArray[FineTuneEvent]
+ }
+ .getOrElse(
+ throw new OpenAIScalaClientException(
+ s"The attribute 'data' is not present in the response: ${response.toString()}."
+ )
+ )
)
}
override def deleteFineTuneModel(
- modelId: String
+ modelId: String
): Future[DeleteResponse] =
execDELETEWithStatus(
EndPoint.models,
endPointParam = Some(modelId)
- ).map( response =>
- handleNotFoundAndError(response).map(jsResponse =>
- (jsResponse \ "deleted").toOption.map {
- _.asSafe[Boolean] match {
- case true => DeleteResponse.Deleted
- case false => DeleteResponse.NotDeleted
- }
- }.getOrElse(
- throw new OpenAIScalaClientException(s"The attribute 'deleted' is not present in the response: ${response.toString()}.")
+ ).map(response =>
+ handleNotFoundAndError(response)
+ .map(jsResponse =>
+ (jsResponse \ "deleted").toOption
+ .map {
+ _.asSafe[Boolean] match {
+ case true => DeleteResponse.Deleted
+ case false => DeleteResponse.NotDeleted
+ }
+ }
+ .getOrElse(
+ throw new OpenAIScalaClientException(
+ s"The attribute 'deleted' is not present in the response: ${response.toString()}."
+ )
+ )
+ )
+ .getOrElse(
+ // we got a not-found http code (404)
+ DeleteResponse.NotFound
)
- ).getOrElse(
- // we got a not-found http code (404)
- DeleteResponse.NotFound
- )
)
override def createModeration(
- input: String,
- settings: CreateModerationSettings
+ input: String,
+ settings: CreateModerationSettings
): Future[ModerationResponse] =
execPOST(
EndPoint.moderations,
@@ -519,34 +567,36 @@ private class OpenAIServiceImpl(
// aux
override protected def getWSRequestOptional(
- endPoint: Option[PEP],
- endPointParam: Option[String],
- params: Seq[(PT, Option[Any])] = Nil
- ) =
+ endPoint: Option[PEP],
+ endPointParam: Option[String],
+ params: Seq[(PT, Option[Any])] = Nil
+ ): StandaloneWSRequest#Self =
addHeaders(super.getWSRequestOptional(endPoint, endPointParam, params))
override protected def getWSRequest(
- endPoint: Option[PEP],
- endPointParam: Option[String],
- params: Seq[(PT, Any)] = Nil
- ) =
+ endPoint: Option[PEP],
+ endPointParam: Option[String],
+ params: Seq[(PT, Any)] = Nil
+ ): StandaloneWSRequest#Self =
addHeaders(super.getWSRequest(endPoint, endPointParam, params))
private def addHeaders(request: StandaloneWSRequest) = {
val orgIdHeader = orgId.map(("OpenAI-Organization", _))
val headers = orgIdHeader ++: Seq(("Authorization", s"Bearer $apiKey"))
- request.addHttpHeaders(headers :_*)
+ request.addHttpHeaders(headers: _*)
}
}
object OpenAIServiceFactory extends OpenAIServiceFactoryHelper[OpenAIService] {
override def apply(
- apiKey: String,
- orgId: Option[String] = None,
- timeouts: Option[Timeouts] = None)(
- implicit ec: ExecutionContext, materializer: Materializer
+ apiKey: String,
+ orgId: Option[String] = None,
+ timeouts: Option[Timeouts] = None
+ )(implicit
+ ec: ExecutionContext,
+ materializer: Materializer
): OpenAIService =
new OpenAIServiceImpl(apiKey, orgId, timeouts)
-}
\ No newline at end of file
+}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartFormData.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartFormData.scala
index e0d2979a..81936cc2 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartFormData.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartFormData.scala
@@ -1,13 +1,13 @@
package io.cequence.openaiscala.service.ws
case class MultipartFormData(
- dataParts: Map[String, Seq[String]] = Map(),
- files: Seq[FilePart] = Nil
+ dataParts: Map[String, Seq[String]] = Map(),
+ files: Seq[FilePart] = Nil
)
case class FilePart(
- key: String,
- path: String,
- headerFileName: Option[String] = None,
- contentType: Option[String] = None
-)
\ No newline at end of file
+ key: String,
+ path: String,
+ headerFileName: Option[String] = None,
+ contentType: Option[String] = None
+)
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartWritable.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartWritable.scala
index 2b7e3a0b..c30f5262 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartWritable.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/MultipartWritable.scala
@@ -7,10 +7,10 @@ import play.api.libs.ws.{BodyWritable, SourceBody}
import java.nio.file.Paths
-/**
- * Adapted from `play.api.http.writeableOf_MultipartFormData` but more efficient due to the fact that,
- * rather then fully materializing, form data and files are concatenated as sources/streams before sending out.
- */
+/** Adapted from `play.api.http.writeableOf_MultipartFormData` but more
+ * efficient due to the fact that, rather then fully materializing, form data
+ * and files are concatenated as sources/streams before sending out.
+ */
object MultipartWritable {
object HttpHeaderNames {
@@ -18,25 +18,25 @@ object MultipartWritable {
val CONTENT_TYPE = "content-type"
}
- /**
- * `Writeable` for `MultipartFormData`.
- */
- def writeableOf_MultipartFormData(
- charset: String)(
- implicit materializer: Materializer
+ /** `Writeable` for `MultipartFormData`.
+ */
+ def writeableOf_MultipartFormData(charset: String)(implicit
+ materializer: Materializer
): BodyWritable[MultipartFormData] = {
- val boundary: String = "--------" + scala.util.Random.alphanumeric.take(20).mkString("")
+ val boundary: String =
+ "--------" + scala.util.Random.alphanumeric.take(20).mkString("")
def encode(str: String) = ByteString.apply(str, charset)
def formatDataParts(data: Map[String, Seq[String]]) = {
- val dataParts = data.flatMap {
- case (name, values) =>
+ val dataParts = data
+ .flatMap { case (name, values) =>
values.map { value =>
s"--$boundary\r\n${HttpHeaderNames.CONTENT_DISPOSITION}: form-data; name=$name\r\n\r\n$value\r\n"
}
- }.mkString("")
+ }
+ .mkString("")
encode(dataParts)
}
@@ -44,34 +44,44 @@ object MultipartWritable {
def filePartHeader(file: FilePart) = {
val name = s""""${file.key}""""
val filename = s""""${file.headerFileName.getOrElse(file.path)}""""
- val contentType = file.contentType.map { ct =>
- s"${HttpHeaderNames.CONTENT_TYPE}: $ct\r\n"
- }.getOrElse("")
+ val contentType = file.contentType
+ .map { ct =>
+ s"${HttpHeaderNames.CONTENT_TYPE}: $ct\r\n"
+ }
+ .getOrElse("")
- encode(s"--$boundary\r\n${HttpHeaderNames.CONTENT_DISPOSITION}: form-data; name=$name; filename=$filename\r\n$contentType\r\n")
+ encode(
+ s"--$boundary\r\n${HttpHeaderNames.CONTENT_DISPOSITION}: form-data; name=$name; filename=$filename\r\n$contentType\r\n"
+ )
}
BodyWritable[MultipartFormData](
transform = { (form: MultipartFormData) =>
// combined data source
- val dataSource: Source[ByteString, _] = Source.single(formatDataParts(form.dataParts))
+ val dataSource: Source[ByteString, _] =
+ Source.single(formatDataParts(form.dataParts))
// files as sources
val fileSources: Seq[Source[ByteString, _]] = form.files.map { file =>
val fileSource = FileIO.fromPath(Paths.get(file.path))
- Source.single(filePartHeader(file)).concat(fileSource).concat(Source.single(encode("\r\n")))
+ Source
+ .single(filePartHeader(file))
+ .concat(fileSource)
+ .concat(Source.single(encode("\r\n")))
}
// file sources combined
- val combinedFileSource = fileSources.foldLeft(Source.empty[ByteString])(_.concat(_))
+ val combinedFileSource =
+ fileSources.foldLeft(Source.empty[ByteString])(_.concat(_))
// all sources concatenated into one
- val finalSource = dataSource.concat(combinedFileSource).concat(Source.single(encode(s"--$boundary--")))
+ val finalSource = dataSource
+ .concat(combinedFileSource)
+ .concat(Source.single(encode(s"--$boundary--")))
SourceBody(finalSource)
},
-
contentType = s"multipart/form-data; boundary=$boundary"
)
}
-}
\ No newline at end of file
+}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/Timeouts.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/Timeouts.scala
index 12612c4d..fdd69e7c 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/Timeouts.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/Timeouts.scala
@@ -1,8 +1,8 @@
package io.cequence.openaiscala.service.ws
case class Timeouts(
- requestTimeout: Option[Int] = None,
- readTimeout: Option[Int] = None,
- connectTimeout: Option[Int] = None,
- pooledConnectionIdleTimeout: Option[Int] = None
+ requestTimeout: Option[Int] = None,
+ readTimeout: Option[Int] = None,
+ connectTimeout: Option[Int] = None,
+ pooledConnectionIdleTimeout: Option[Int] = None
)
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSHelper.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSHelper.scala
index d86a282a..d489b99d 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSHelper.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSHelper.scala
@@ -22,10 +22,20 @@ trait WSHelper {
import play.shaded.ahc.org.asynchttpclient._
val asyncHttpClientConfig = new DefaultAsyncHttpClientConfig.Builder()
- .setConnectTimeout(timeouts.connectTimeout.getOrElse(DefaultTimeouts.connectTimeout))
- .setReadTimeout(timeouts.readTimeout.getOrElse(DefaultTimeouts.readTimeout))
- .setPooledConnectionIdleTimeout(timeouts.pooledConnectionIdleTimeout.getOrElse(DefaultTimeouts.pooledConnectionIdleTimeout))
- .setRequestTimeout(timeouts.requestTimeout.getOrElse(DefaultTimeouts.requestTimeout))
+ .setConnectTimeout(
+ timeouts.connectTimeout.getOrElse(DefaultTimeouts.connectTimeout)
+ )
+ .setReadTimeout(
+ timeouts.readTimeout.getOrElse(DefaultTimeouts.readTimeout)
+ )
+ .setPooledConnectionIdleTimeout(
+ timeouts.pooledConnectionIdleTimeout.getOrElse(
+ DefaultTimeouts.pooledConnectionIdleTimeout
+ )
+ )
+ .setRequestTimeout(
+ timeouts.requestTimeout.getOrElse(DefaultTimeouts.requestTimeout)
+ )
.build
val asyncHttpClient = new DefaultAsyncHttpClient(asyncHttpClientConfig)
val client = new StandaloneAhcWSClient(asyncHttpClient)
@@ -38,4 +48,4 @@ trait WSHelper {
def close(): Unit =
client.close()
-}
\ No newline at end of file
+}
diff --git a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala
index b6240ceb..f41e608f 100644
--- a/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala
+++ b/openai-client/src/main/scala/io/cequence/openaiscala/service/ws/WSRequestHelper.scala
@@ -3,8 +3,13 @@ package io.cequence.openaiscala.service.ws
import com.fasterxml.jackson.core.JsonParseException
import com.fasterxml.jackson.databind.JsonMappingException
import io.cequence.openaiscala.JsonUtil.toJson
-import io.cequence.openaiscala.{OpenAIScalaClientException, OpenAIScalaClientTimeoutException, OpenAIScalaClientUnknownHostException, OpenAIScalaTokenCountExceededException}
-import play.api.libs.json.{JsObject, JsValue, Json}
+import io.cequence.openaiscala.{
+ OpenAIScalaClientException,
+ OpenAIScalaClientTimeoutException,
+ OpenAIScalaClientUnknownHostException,
+ OpenAIScalaTokenCountExceededException
+}
+import play.api.libs.json.{JsObject, JsValue}
import play.api.libs.ws.{BodyWritable, StandaloneWSRequest}
import play.api.libs.ws.JsonBodyWritables._
import play.api.libs.ws.JsonBodyReadables._
@@ -14,11 +19,12 @@ import java.net.UnknownHostException
import java.util.concurrent.TimeoutException
import scala.concurrent.{ExecutionContext, Future}
-/**
- * Base class for web services with handy GET, POST, and DELETE request builders, and response handling
- *
- * @since Jan 2023
- */
+/** Base class for web services with handy GET, POST, and DELETE request
+ * builders, and response handling
+ *
+ * @since Jan
+ * 2023
+ */
trait WSRequestHelper extends WSHelper {
protected val coreUrl: String
@@ -41,19 +47,21 @@ trait WSRequestHelper extends WSHelper {
/////////
protected def execGET(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil
): Future[JsValue] =
execGETWithStatus(
- endPoint, endPointParam, params
+ endPoint,
+ endPointParam,
+ params
).map(handleErrorResponse)
protected def execGETWithStatus(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichJsResponse] = {
val request = getWSRequestOptional(Some(endPoint), endPointParam, params)
@@ -61,23 +69,25 @@ trait WSRequestHelper extends WSHelper {
}
protected def execGETJsonAux(
- request: StandaloneWSRequest,
- endPointForLogging: Option[PEP], // only for logging
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ request: StandaloneWSRequest,
+ endPointForLogging: Option[PEP], // only for logging
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichJsResponse] =
execRequestJsonAux(
- request, _.get(),
+ request,
+ _.get(),
acceptableStatusCodes,
endPointForLogging
)
protected def execGETStringAux(
- request: StandaloneWSRequest,
- endPointForLogging: Option[PEP], // only for logging
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ request: StandaloneWSRequest,
+ endPointForLogging: Option[PEP], // only for logging
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichStringResponse] =
execRequestStringAux(
- request, _.get(),
+ request,
+ _.get(),
acceptableStatusCodes,
endPointForLogging
)
@@ -86,115 +96,132 @@ trait WSRequestHelper extends WSHelper {
// POST //
//////////
- /**
- * @param fileParams - the third param in a tuple is a display (header) file name
- */
+ /** @param fileParams
+ * \- the third param in a tuple is a display (header) file name
+ */
protected def execPOSTMultipart(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- fileParams: Seq[(PT, File, Option[String])] = Nil,
- bodyParams: Seq[(PT, Option[Any])] = Nil
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ fileParams: Seq[(PT, File, Option[String])] = Nil,
+ bodyParams: Seq[(PT, Option[Any])] = Nil
): Future[JsValue] =
execPOSTMultipartWithStatus(
- endPoint, endPointParam, params, fileParams, bodyParams
+ endPoint,
+ endPointParam,
+ params,
+ fileParams,
+ bodyParams
).map(handleErrorResponse)
- /**
- * @param fileParams - the third param in a tuple is a display (header) file name
- */
+ /** @param fileParams
+ * \- the third param in a tuple is a display (header) file name
+ */
protected def execPOSTMultipartWithStatus(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- fileParams: Seq[(PT, File, Option[String])] = Nil,
- bodyParams: Seq[(PT, Option[Any])] = Nil,
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ fileParams: Seq[(PT, File, Option[String])] = Nil,
+ bodyParams: Seq[(PT, Option[Any])] = Nil,
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichJsResponse] = {
val request = getWSRequestOptional(Some(endPoint), endPointParam, params)
val formData = createMultipartFormData(fileParams, bodyParams)
- implicit val writeable: BodyWritable[MultipartFormData] = writeableOf_MultipartFormData("utf-8")
+ implicit val writeable: BodyWritable[MultipartFormData] =
+ writeableOf_MultipartFormData("utf-8")
execPOSTJsonAux(request, formData, Some(endPoint), acceptableStatusCodes)
}
- /**
- * @param fileParams - the third param in a tuple is a display (header) file name
- */
+ /** @param fileParams
+ * \- the third param in a tuple is a display (header) file name
+ */
protected def execPOSTMultipartWithStatusString(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- fileParams: Seq[(PT, File, Option[String])] = Nil,
- bodyParams: Seq[(PT, Option[Any])] = Nil,
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ fileParams: Seq[(PT, File, Option[String])] = Nil,
+ bodyParams: Seq[(PT, Option[Any])] = Nil,
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichStringResponse] = {
val request = getWSRequestOptional(Some(endPoint), endPointParam, params)
val formData = createMultipartFormData(fileParams, bodyParams)
- implicit val writeable: BodyWritable[MultipartFormData] = writeableOf_MultipartFormData("utf-8")
+ implicit val writeable: BodyWritable[MultipartFormData] =
+ writeableOf_MultipartFormData("utf-8")
execPOSTStringAux(request, formData, Some(endPoint), acceptableStatusCodes)
}
// create a multipart form data holder contain classic data (key-value) parts as well as file parts
private def createMultipartFormData(
- fileParams: Seq[(PT, File, Option[String])] = Nil,
- bodyParams: Seq[(PT, Option[Any])] = Nil
+ fileParams: Seq[(PT, File, Option[String])] = Nil,
+ bodyParams: Seq[(PT, Option[Any])] = Nil
) = MultipartFormData(
dataParts = bodyParams.collect { case (key, Some(value)) =>
(key.toString, Seq(value.toString))
}.toMap,
-
files = fileParams.map { case (key, file, headerFileName) =>
FilePart(key.toString, file.getPath, headerFileName)
}
)
protected def execPOST(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- bodyParams: Seq[(PT, Option[JsValue])] = Nil
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ bodyParams: Seq[(PT, Option[JsValue])] = Nil
): Future[JsValue] =
execPOSTWithStatus(
- endPoint, endPointParam, params, bodyParams
+ endPoint,
+ endPointParam,
+ params,
+ bodyParams
).map(handleErrorResponse)
protected def execPOSTWithStatus(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- bodyParams: Seq[(PT, Option[JsValue])] = Nil,
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ bodyParams: Seq[(PT, Option[JsValue])] = Nil,
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichJsResponse] = {
val request = getWSRequestOptional(Some(endPoint), endPointParam, params)
- val bodyParamsX = bodyParams.collect { case (fieldName, Some(jsValue)) => (fieldName.toString, jsValue) }
+ val bodyParamsX = bodyParams.collect { case (fieldName, Some(jsValue)) =>
+ (fieldName.toString, jsValue)
+ }
- execPOSTJsonAux(request, JsObject(bodyParamsX), Some(endPoint), acceptableStatusCodes)
+ execPOSTJsonAux(
+ request,
+ JsObject(bodyParamsX),
+ Some(endPoint),
+ acceptableStatusCodes
+ )
}
protected def execPOSTJsonAux[T: BodyWritable](
- request: StandaloneWSRequest,
- body: T,
- endPointForLogging: Option[PEP], // only for logging
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
- ) =
+ request: StandaloneWSRequest,
+ body: T,
+ endPointForLogging: Option[PEP], // only for logging
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ ): Future[RichJsResponse] =
execRequestJsonAux(
- request, _.post(body),
+ request,
+ _.post(body),
acceptableStatusCodes,
endPointForLogging
)
protected def execPOSTStringAux[T: BodyWritable](
- request: StandaloneWSRequest,
- body: T,
- endPointForLogging: Option[PEP], // only for logging
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
- ) =
+ request: StandaloneWSRequest,
+ body: T,
+ endPointForLogging: Option[PEP], // only for logging
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ ): Future[RichStringResponse] =
execRequestStringAux(
- request, _.post(body),
+ request,
+ _.post(body),
acceptableStatusCodes,
endPointForLogging
)
@@ -204,19 +231,21 @@ trait WSRequestHelper extends WSHelper {
////////////
protected def execDELETE(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil
): Future[JsValue] =
execDELETEWithStatus(
- endPoint, endPointParam, params
+ endPoint,
+ endPointParam,
+ params
).map(handleErrorResponse)
protected def execDELETEWithStatus(
- endPoint: PEP,
- endPointParam: Option[String] = None,
- params: Seq[(PT, Option[Any])] = Nil,
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ endPoint: PEP,
+ endPointParam: Option[String] = None,
+ params: Seq[(PT, Option[Any])] = Nil,
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichJsResponse] = {
val request = getWSRequestOptional(Some(endPoint), endPointParam, params)
@@ -224,12 +253,13 @@ trait WSRequestHelper extends WSHelper {
}
private def execDeleteAux(
- request: StandaloneWSRequest,
- endPointForLogging: Option[PEP], // only for logging
- acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
+ request: StandaloneWSRequest,
+ endPointForLogging: Option[PEP], // only for logging
+ acceptableStatusCodes: Seq[Int] = defaultAcceptableStatusCodes
): Future[RichJsResponse] =
execRequestJsonAux(
- request, _.delete(),
+ request,
+ _.delete(),
acceptableStatusCodes,
endPointForLogging
)
@@ -239,9 +269,9 @@ trait WSRequestHelper extends WSHelper {
////////////////
protected def getWSRequest(
- endPoint: Option[PEP],
- endPointParam: Option[String],
- params: Seq[(PT, Any)]
+ endPoint: Option[PEP],
+ endPointParam: Option[String],
+ params: Seq[(PT, Any)]
): StandaloneWSRequest = {
val paramsString = paramsAsString(params)
val url = createUrl(endPoint, endPointParam) + paramsString
@@ -250,9 +280,9 @@ trait WSRequestHelper extends WSHelper {
}
protected def getWSRequestOptional(
- endPoint: Option[PEP],
- endPointParam: Option[String],
- params: Seq[(PT, Option[Any])]
+ endPoint: Option[PEP],
+ endPointParam: Option[String],
+ params: Seq[(PT, Option[Any])]
): StandaloneWSRequest = {
val paramsString = paramsOptionalAsString(params)
val url = createUrl(endPoint, endPointParam) + paramsString
@@ -261,10 +291,10 @@ trait WSRequestHelper extends WSHelper {
}
private def execRequestJsonAux(
- request: StandaloneWSRequest,
- exec: StandaloneWSRequest => Future[StandaloneWSRequest#Response],
- acceptableStatusCodes: Seq[Int] = Nil,
- endPointForLogging: Option[PEP] = None // only for logging
+ request: StandaloneWSRequest,
+ exec: StandaloneWSRequest => Future[StandaloneWSRequest#Response],
+ acceptableStatusCodes: Seq[Int] = Nil,
+ endPointForLogging: Option[PEP] = None // only for logging
): Future[RichJsResponse] =
execRequestRaw(
request,
@@ -276,17 +306,23 @@ trait WSRequestHelper extends WSHelper {
try {
Left(response.body[JsValue])
} catch {
- case _: JsonParseException => throw new OpenAIScalaClientException(s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")}: '${response.body}' is not a JSON.")
- case _: JsonMappingException => throw new OpenAIScalaClientException(s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")}: '${response.body}' is an unmappable JSON.")
+ case _: JsonParseException =>
+ throw new OpenAIScalaClientException(
+ s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")}: '${response.body}' is not a JSON."
+ )
+ case _: JsonMappingException =>
+ throw new OpenAIScalaClientException(
+ s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")}: '${response.body}' is an unmappable JSON."
+ )
}
case Right(response) => Right(response)
})
private def execRequestStringAux(
- request: StandaloneWSRequest,
- exec: StandaloneWSRequest => Future[StandaloneWSRequest#Response],
- acceptableStatusCodes: Seq[Int] = Nil,
- endPointForLogging: Option[PEP] = None // only for logging
+ request: StandaloneWSRequest,
+ exec: StandaloneWSRequest => Future[StandaloneWSRequest#Response],
+ acceptableStatusCodes: Seq[Int] = Nil,
+ endPointForLogging: Option[PEP] = None // only for logging
): Future[RichStringResponse] =
execRequestRaw(
request,
@@ -294,15 +330,15 @@ trait WSRequestHelper extends WSHelper {
acceptableStatusCodes,
endPointForLogging
).map(_ match {
- case Left(response) => Left(response.body)
+ case Left(response) => Left(response.body)
case Right(response) => Right(response)
})
private def execRequestRaw(
- request: StandaloneWSRequest,
- exec: StandaloneWSRequest => Future[StandaloneWSRequest#Response],
- acceptableStatusCodes: Seq[Int] = Nil,
- endPointForLogging: Option[PEP] = None // only for logging
+ request: StandaloneWSRequest,
+ exec: StandaloneWSRequest => Future[StandaloneWSRequest#Response],
+ acceptableStatusCodes: Seq[Int] = Nil,
+ endPointForLogging: Option[PEP] = None // only for logging
): Future[Either[StandaloneWSRequest#Response, (Int, String)]] = {
exec(request).map { response =>
if (!acceptableStatusCodes.contains(response.status))
@@ -311,60 +347,79 @@ trait WSRequestHelper extends WSHelper {
Left(response)
}
}.recover {
- case e: TimeoutException => throw new OpenAIScalaClientTimeoutException(s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")} timed out: ${e.getMessage}.")
- case e: UnknownHostException => throw new OpenAIScalaClientUnknownHostException(s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")} cannot resolve a host name: ${e.getMessage}.")
+ case e: TimeoutException =>
+ throw new OpenAIScalaClientTimeoutException(
+ s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")} timed out: ${e.getMessage}."
+ )
+ case e: UnknownHostException =>
+ throw new OpenAIScalaClientUnknownHostException(
+ s"$serviceName.${endPointForLogging.map(_.toString).getOrElse("")} cannot resolve a host name: ${e.getMessage}."
+ )
}
// aux
protected def jsonBodyParams(
- params: (PT, Option[Any])*
- ) =
+ params: (PT, Option[Any])*
+ ): Seq[(PT, Option[JsValue])] =
params.map { case (paramName, value) => (paramName, value.map(toJson)) }
- protected def handleErrorResponse[T](response: RichResponse[T]) =
+ protected def handleErrorResponse[T](response: RichResponse[T]): T =
response match {
case Left(data) => data
case Right((errorCode, message)) =>
val errorMessage = s"Code ${errorCode} : ${message}"
- if (message.contains("Please reduce your prompt; or completion length") ||
- message.contains("Please reduce the length of the messages")
- )
+ if (
+ message.contains("Please reduce your prompt; or completion length") ||
+ message.contains("Please reduce the length of the messages")
+ )
throw new OpenAIScalaTokenCountExceededException(errorMessage)
else
throw new OpenAIScalaClientException(errorMessage)
}
- protected def handleNotFoundAndError[T](response: Either[T, (Int, String)]): Option[T] =
+ protected def handleNotFoundAndError[T](
+ response: Either[T, (Int, String)]
+ ): Option[T] =
response match {
case Left(value) => Some(value)
case Right((errorCode, message)) =>
- if (errorCode == 404) None else throw new OpenAIScalaClientException(s"Code ${errorCode} : ${message}")
+ if (errorCode == 404) None
+ else
+ throw new OpenAIScalaClientException(
+ s"Code ${errorCode} : ${message}"
+ )
}
- protected def paramsAsString(params: Seq[(PT, Any)]) = {
- val string = params.map { case (tag, value) => s"$tag=$value" }.mkString("&")
+ protected def paramsAsString(params: Seq[(PT, Any)]): String = {
+ val string =
+ params.map { case (tag, value) => s"$tag=$value" }.mkString("&")
if (string.nonEmpty) s"?$string" else ""
}
- protected def paramsOptionalAsString(params: Seq[(PT, Option[Any])]) = {
- val string = params.collect { case (tag, Some(value)) => s"$tag=$value" }.mkString("&")
+ protected def paramsOptionalAsString(
+ params: Seq[(PT, Option[Any])]
+ ): String = {
+ val string =
+ params.collect { case (tag, Some(value)) => s"$tag=$value" }.mkString("&")
if (string.nonEmpty) s"?$string" else ""
}
protected def createUrl(
- endpoint: Option[PEP],
- value: Option[String] = None
- ) =
- coreUrl + endpoint.map(_.toString).getOrElse("") + value.map("/" + _).getOrElse("")
+ endpoint: Option[PEP],
+ value: Option[String] = None
+ ): String =
+ coreUrl + endpoint.map(_.toString).getOrElse("") + value
+ .map("/" + _)
+ .getOrElse("")
protected def toOptionalParams(
- params: Seq[(PT, Any)]
- ) =
+ params: Seq[(PT, Any)]
+ ): Seq[(PT, Some[Any])] =
params.map { case (a, b) => (a, Some(b)) }
// close
diff --git a/openai-core/build.sbt b/openai-core/build.sbt
index e176593e..3ee7a9c9 100644
--- a/openai-core/build.sbt
+++ b/openai-core/build.sbt
@@ -2,4 +2,4 @@ import sbt.Keys.test
name := "openai-scala-core"
-description := "Core module of OpenAI Scala client"
\ No newline at end of file
+description := "Core module of OpenAI Scala client"
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala b/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala
index dd6347eb..040fb8c2 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/OpenAIScalaClientException.scala
@@ -1,17 +1,21 @@
package io.cequence.openaiscala
-class OpenAIScalaClientException(message: String, cause: Throwable) extends RuntimeException(message, cause) {
+class OpenAIScalaClientException(message: String, cause: Throwable)
+ extends RuntimeException(message, cause) {
def this(message: String) = this(message, null)
}
-class OpenAIScalaClientTimeoutException(message: String, cause: Throwable) extends OpenAIScalaClientException(message, cause) {
+class OpenAIScalaClientTimeoutException(message: String, cause: Throwable)
+ extends OpenAIScalaClientException(message, cause) {
def this(message: String) = this(message, null)
}
-class OpenAIScalaClientUnknownHostException(message: String, cause: Throwable) extends OpenAIScalaClientException(message, cause) {
+class OpenAIScalaClientUnknownHostException(message: String, cause: Throwable)
+ extends OpenAIScalaClientException(message, cause) {
def this(message: String) = this(message, null)
}
-class OpenAIScalaTokenCountExceededException(message: String, cause: Throwable) extends OpenAIScalaClientException(message, cause) {
+class OpenAIScalaTokenCountExceededException(message: String, cause: Throwable)
+ extends OpenAIScalaClientException(message, cause) {
def this(message: String) = this(message, null)
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/EnumValue.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/EnumValue.scala
index fee9ed97..a54b170f 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/EnumValue.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/EnumValue.scala
@@ -1,5 +1,6 @@
package io.cequence.openaiscala.domain
abstract class EnumValue(value: String = "") {
- override def toString = if (value.nonEmpty) value else getClass.getSimpleName.stripSuffix("$")
+ override def toString: String =
+ if (value.nonEmpty) value else getClass.getSimpleName.stripSuffix("$")
}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/FunctionSpec.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/FunctionSpec.scala
index 8eb20003..1a70ad1d 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/FunctionSpec.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/FunctionSpec.scala
@@ -1,14 +1,14 @@
package io.cequence.openaiscala.domain
case class FunctionSpec(
- // The name of the function to be called.
- // Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
- name: String,
+ // The name of the function to be called.
+ // Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
+ name: String,
- // The description of what the function does.
- description: Option[String] = None,
+ // The description of what the function does.
+ description: Option[String] = None,
- // The parameters the functions accepts, described as a JSON Schema object.
- // See the guide for examples, and the JSON Schema reference for documentation about the format.
- parameters: Map[String, Any]
-)
\ No newline at end of file
+ // The parameters the functions accepts, described as a JSON Schema object.
+ // See the guide for examples, and the JSON Schema reference for documentation about the format.
+ parameters: Map[String, Any]
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/MessageSpec.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/MessageSpec.scala
index 1bf6628d..7a3137c1 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/MessageSpec.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/MessageSpec.scala
@@ -7,39 +7,39 @@ sealed trait BaseMessageSpec {
}
final case class MessageSpec(
- // The role of the messages author. One of system, user, or assistant.
- role: ChatRole,
+ // The role of the messages author. One of system, user, or assistant.
+ role: ChatRole,
- // The contents of the message.
- content: String,
+ // The contents of the message.
+ content: String,
- // The name of the author of this message. name is required if role is function, and
- // it should be the name of the function whose response is in the content.
- // May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
- name: Option[String] = None,
+ // The name of the author of this message. name is required if role is function, and
+ // it should be the name of the function whose response is in the content.
+ // May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
+ name: Option[String] = None
) extends BaseMessageSpec {
- override val contentOptional = Some(content)
+ override val contentOptional: Some[String] = Some(content)
}
final case class FunMessageSpec(
- // The role of the messages author. One of system, user, assistant, or function.
- role: ChatRole,
+ // The role of the messages author. One of system, user, assistant, or function.
+ role: ChatRole,
- // The contents of the message. Content is required for all messages except assistant messages with function calls.
- content: Option[String],
+ // The contents of the message. Content is required for all messages except assistant messages with function calls.
+ content: Option[String],
- // The name of the author of this message. name is required if role is function, and
- // it should be the name of the function whose response is in the content.
- // May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
- name: Option[String] = None,
+ // The name of the author of this message. name is required if role is function, and
+ // it should be the name of the function whose response is in the content.
+ // May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
+ name: Option[String] = None,
- // The name and arguments of a function that should be called, as generated by the model.
- function_call: Option[FunctionCallSpec] = None
+ // The name and arguments of a function that should be called, as generated by the model.
+ function_call: Option[FunctionCallSpec] = None
) extends BaseMessageSpec {
override val contentOptional = content
}
case class FunctionCallSpec(
- name: String,
- arguments: String
-)
\ No newline at end of file
+ name: String,
+ arguments: String
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/ModelId.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/ModelId.scala
index 2ecf0c97..5239c69e 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/ModelId.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/ModelId.scala
@@ -1,10 +1,10 @@
package io.cequence.openaiscala.domain
-/**
- * OpenAI models available as of `2023-03-07`.
- *
- * @since Jan 2023
- */
+/** OpenAI models available as of `2023-03-07`.
+ *
+ * @since Jan
+ * 2023
+ */
object ModelId {
// Ada
@@ -92,23 +92,31 @@ object ModelId {
val whisper_1_2 = "whisper-1.2"
// GPT-3.5 (ChatGPT)
- val gpt_3_5_turbo = "gpt-3.5-turbo" // 4k context, uses the version 0301 till June 27th, then 0613
+ val gpt_3_5_turbo =
+ "gpt-3.5-turbo" // 4k context, uses the version 0301 till June 27th, then 0613
@Deprecated // supported till 09/13/2023
- val gpt_3_5_turbo_0301 = "gpt-3.5-turbo-0301" // 4k context (March 1st snapshot)
- val gpt_3_5_turbo_0613 = "gpt-3.5-turbo-0613" // 4k context (June 13th snapshot), fine-tuned for function calling
+ val gpt_3_5_turbo_0301 =
+ "gpt-3.5-turbo-0301" // 4k context (March 1st snapshot)
+ val gpt_3_5_turbo_0613 =
+ "gpt-3.5-turbo-0613" // 4k context (June 13th snapshot), fine-tuned for function calling
val gpt_3_5_turbo_16k = "gpt-3.5-turbo-16k" // 16k context
- val gpt_3_5_turbo_16k_0613 = "gpt-3.5-turbo-16k-0613" // 16k context (June 13th snapshot), fine-tuned for function calling
+ val gpt_3_5_turbo_16k_0613 =
+ "gpt-3.5-turbo-16k-0613" // 16k context (June 13th snapshot), fine-tuned for function calling
// GPT-4
- val gpt_4 = "gpt-4" // 8k context, uses the version 0301 till June 27th, then 0613
+ val gpt_4 =
+ "gpt-4" // 8k context, uses the version 0301 till June 27th, then 0613
@Deprecated // supported till 09/13/2023
val gpt_4_0314 = "gpt-4-0314" // 8k context (March 14th snapshot)
- val gpt_4_0613 = "gpt-4-0613" // 8k context (June 13th snapshot), fine-tuned for function calling
+ val gpt_4_0613 =
+ "gpt-4-0613" // 8k context (June 13th snapshot), fine-tuned for function calling
- val gpt_4_32k = "gpt-4-32k" // 32k context, uses the version 0314 till June 27th, then 0613
+ val gpt_4_32k =
+ "gpt-4-32k" // 32k context, uses the version 0314 till June 27th, then 0613
@Deprecated // supported till 09/13/2023
val gpt_4_32k_0314 = "gpt-4-32k-0314" // 32k context (March 14th snapshot)
- val gpt_4_32k_0613 = "gpt-4-32k-0613" // 32k context (June 13th snapshot), fine-tuned for function calling
+ val gpt_4_32k_0613 =
+ "gpt-4-32k-0613" // 32k context (June 13th snapshot), fine-tuned for function calling
// Other
@Deprecated
@@ -116,4 +124,4 @@ object ModelId {
@Deprecated
val code_cushman_002 = "code-cushman-002"
val cushman_2020_05_03 = "cushman:2020-05-03"
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ChatCompletionResponse.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ChatCompletionResponse.scala
index 7f62939b..e974b2aa 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ChatCompletionResponse.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ChatCompletionResponse.scala
@@ -1,10 +1,18 @@
package io.cequence.openaiscala.domain.response
-import io.cequence.openaiscala.domain.{BaseMessageSpec, ChatRole, FunMessageSpec, MessageSpec}
+import io.cequence.openaiscala.domain.{
+ BaseMessageSpec,
+ ChatRole,
+ FunMessageSpec,
+ MessageSpec
+}
import java.{util => ju}
-sealed trait BaseChatCompletionResponse[M <: BaseMessageSpec, C <: BaseChatCompletionChoiceInfo[M]] {
+sealed trait BaseChatCompletionResponse[
+ M <: BaseMessageSpec,
+ C <: BaseChatCompletionChoiceInfo[M]
+] {
val id: String
val created: ju.Date
val model: String
@@ -13,20 +21,23 @@ sealed trait BaseChatCompletionResponse[M <: BaseMessageSpec, C <: BaseChatCompl
}
case class ChatCompletionResponse(
- id: String,
- created: ju.Date,
- model: String,
- choices: Seq[ChatCompletionChoiceInfo],
- usage: Option[UsageInfo]
+ id: String,
+ created: ju.Date,
+ model: String,
+ choices: Seq[ChatCompletionChoiceInfo],
+ usage: Option[UsageInfo]
) extends BaseChatCompletionResponse[MessageSpec, ChatCompletionChoiceInfo]
case class ChatFunCompletionResponse(
- id: String,
- created: ju.Date,
- model: String,
- choices: Seq[ChatFunCompletionChoiceInfo],
- usage: Option[UsageInfo]
-) extends BaseChatCompletionResponse[FunMessageSpec, ChatFunCompletionChoiceInfo]
+ id: String,
+ created: ju.Date,
+ model: String,
+ choices: Seq[ChatFunCompletionChoiceInfo],
+ usage: Option[UsageInfo]
+) extends BaseChatCompletionResponse[
+ FunMessageSpec,
+ ChatFunCompletionChoiceInfo
+ ]
sealed trait BaseChatCompletionChoiceInfo[M <: BaseMessageSpec] {
val message: M
@@ -35,34 +46,34 @@ sealed trait BaseChatCompletionChoiceInfo[M <: BaseMessageSpec] {
}
case class ChatCompletionChoiceInfo(
- message: MessageSpec,
- index: Int,
- finish_reason: Option[String]
+ message: MessageSpec,
+ index: Int,
+ finish_reason: Option[String]
) extends BaseChatCompletionChoiceInfo[MessageSpec]
case class ChatFunCompletionChoiceInfo(
- message: FunMessageSpec,
- index: Int,
- finish_reason: Option[String]
+ message: FunMessageSpec,
+ index: Int,
+ finish_reason: Option[String]
) extends BaseChatCompletionChoiceInfo[FunMessageSpec]
// chunk - streamed
case class ChatCompletionChunkResponse(
- id: String,
- created: ju.Date,
- model: String,
- choices: Seq[ChatCompletionChoiceChunkInfo],
- usage: Option[UsageInfo]
+ id: String,
+ created: ju.Date,
+ model: String,
+ choices: Seq[ChatCompletionChoiceChunkInfo],
+ usage: Option[UsageInfo]
)
case class ChatCompletionChoiceChunkInfo(
- delta: ChunkMessageSpec,
- index: Int,
- finish_reason: Option[String]
+ delta: ChunkMessageSpec,
+ index: Int,
+ finish_reason: Option[String]
)
// we should incorporate this into the MessageSpec hierarchy (but the role is optional)
case class ChunkMessageSpec(
- role: Option[ChatRole],
- content: Option[String]
-)
\ No newline at end of file
+ role: Option[ChatRole],
+ content: Option[String]
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/DeleteResponse.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/DeleteResponse.scala
index 41ea7d74..87878091 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/DeleteResponse.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/DeleteResponse.scala
@@ -8,4 +8,4 @@ object DeleteResponse {
case object Deleted extends DeleteResponse
case object NotDeleted extends DeleteResponse
case object NotFound extends DeleteResponse
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/EmbeddingResponse.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/EmbeddingResponse.scala
index ed963752..3b65f58a 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/EmbeddingResponse.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/EmbeddingResponse.scala
@@ -1,17 +1,17 @@
package io.cequence.openaiscala.domain.response
case class EmbeddingResponse(
- data: Seq[EmbeddingInfo],
- model: String,
- usage: EmbeddingUsageInfo
+ data: Seq[EmbeddingInfo],
+ model: String,
+ usage: EmbeddingUsageInfo
)
case class EmbeddingInfo(
- embedding: Seq[Double],
- index: Int
+ embedding: Seq[Double],
+ index: Int
)
case class EmbeddingUsageInfo(
- prompt_tokens: Int,
- total_tokens: Int
-)
\ No newline at end of file
+ prompt_tokens: Int,
+ total_tokens: Int
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FileInfo.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FileInfo.scala
index 3989ed2e..415e4e4e 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FileInfo.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FileInfo.scala
@@ -3,11 +3,11 @@ package io.cequence.openaiscala.domain.response
import java.{util => ju}
case class FileInfo(
- id: String,
- bytes: Long,
- created_at: ju.Date,
- filename: String,
- purpose: String,
- status: String,
- status_details: Option[String]
-)
\ No newline at end of file
+ id: String,
+ bytes: Long,
+ created_at: ju.Date,
+ filename: String,
+ purpose: String,
+ status: String,
+ status_details: Option[String]
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FineTuneJob.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FineTuneJob.scala
index 1a565abe..68884d73 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FineTuneJob.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/FineTuneJob.scala
@@ -3,29 +3,29 @@ package io.cequence.openaiscala.domain.response
import java.{util => ju}
case class FineTuneJob(
- id: String,
- model: String,
- created_at: ju.Date,
- events: Option[Seq[FineTuneEvent]],
- fine_tuned_model: Option[String],
- hyperparams: FineTuneHyperparams,
- organization_id: String,
- result_files: Seq[FileInfo],
- status: String, // e.g. pending or cancelled
- validation_files: Seq[FileInfo],
- training_files: Seq[FileInfo],
- updated_at: ju.Date,
+ id: String,
+ model: String,
+ created_at: ju.Date,
+ events: Option[Seq[FineTuneEvent]],
+ fine_tuned_model: Option[String],
+ hyperparams: FineTuneHyperparams,
+ organization_id: String,
+ result_files: Seq[FileInfo],
+ status: String, // e.g. pending or cancelled
+ validation_files: Seq[FileInfo],
+ training_files: Seq[FileInfo],
+ updated_at: ju.Date
)
case class FineTuneEvent(
- created_at: ju.Date,
- level: String,
- message: String
+ created_at: ju.Date,
+ level: String,
+ message: String
)
case class FineTuneHyperparams(
- batch_size: Option[Int],
- learning_rate_multiplier: Option[Double],
- n_epochs: Int,
- prompt_loss_weight: Double
-)
\ No newline at end of file
+ batch_size: Option[Int],
+ learning_rate_multiplier: Option[Double],
+ n_epochs: Int,
+ prompt_loss_weight: Double
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ImageInfo.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ImageInfo.scala
index 8a3c0ce1..db2f9142 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ImageInfo.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ImageInfo.scala
@@ -3,6 +3,6 @@ package io.cequence.openaiscala.domain.response
import java.{util => ju}
case class ImageInfo(
- created: ju.Date,
- data: Seq[Map[String, String]]
+ created: ju.Date,
+ data: Seq[Map[String, String]]
)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModelInfo.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModelInfo.scala
index efd13bbf..7bb659be 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModelInfo.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModelInfo.scala
@@ -3,24 +3,24 @@ package io.cequence.openaiscala.domain.response
import java.{util => ju}
case class ModelInfo(
- id: String,
- created: ju.Date,
- owned_by: String,
- root: String,
- parent: Option[String],
- permission: Seq[Permission]
+ id: String,
+ created: ju.Date,
+ owned_by: String,
+ root: String,
+ parent: Option[String],
+ permission: Seq[Permission]
)
case class Permission(
- id: String,
- created: ju.Date,
- allow_create_engine: Boolean,
- allow_sampling: Boolean,
- allow_logprobs: Boolean,
- allow_search_indices: Boolean,
- allow_view: Boolean,
- allow_fine_tuning: Boolean,
- organization: String,
- group: Option[String],
- is_blocking: Boolean
-)
\ No newline at end of file
+ id: String,
+ created: ju.Date,
+ allow_create_engine: Boolean,
+ allow_sampling: Boolean,
+ allow_logprobs: Boolean,
+ allow_search_indices: Boolean,
+ allow_view: Boolean,
+ allow_fine_tuning: Boolean,
+ organization: String,
+ group: Option[String],
+ is_blocking: Boolean
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModerationResponse.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModerationResponse.scala
index df8f505b..0c5da1e0 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModerationResponse.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ModerationResponse.scala
@@ -1,33 +1,33 @@
package io.cequence.openaiscala.domain.response
case class ModerationResponse(
- id: String,
- model: String,
- results: Seq[ModerationResult]
+ id: String,
+ model: String,
+ results: Seq[ModerationResult]
)
case class ModerationResult(
- categories: ModerationCategories,
- category_scores: ModerationCategoryScores,
- flagged: Boolean
+ categories: ModerationCategories,
+ category_scores: ModerationCategoryScores,
+ flagged: Boolean
)
case class ModerationCategories(
- hate: Boolean,
- hate_threatening: Boolean,
- self_harm: Boolean,
- sexual: Boolean,
- sexual_minors: Boolean,
- violence: Boolean,
- violence_graphic: Boolean
+ hate: Boolean,
+ hate_threatening: Boolean,
+ self_harm: Boolean,
+ sexual: Boolean,
+ sexual_minors: Boolean,
+ violence: Boolean,
+ violence_graphic: Boolean
)
case class ModerationCategoryScores(
- hate: Double,
- hate_threatening: Double,
- self_harm: Double,
- sexual: Double,
- sexual_minors: Double,
- violence: Double,
- violence_graphic: Double
-)
\ No newline at end of file
+ hate: Double,
+ hate_threatening: Double,
+ self_harm: Double,
+ sexual: Double,
+ sexual_minors: Double,
+ violence: Double,
+ violence_graphic: Double
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ResponseStringMarshaller.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ResponseStringMarshaller.scala
index 956e3470..23269997 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ResponseStringMarshaller.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/ResponseStringMarshaller.scala
@@ -1,11 +1,10 @@
package io.cequence.openaiscala.domain.response
-/**
- * Used only for debugging... can be removed later on
- */
+/** Used only for debugging... can be removed later on
+ */
trait ResponseStringMarshaller {
- def fineTuneToString(fileInfo: FineTuneJob) =
+ def fineTuneToString(fileInfo: FineTuneJob): String =
s"""File-tune Job
|-id: ${fileInfo.id}
|-model: ${fileInfo.model}
@@ -13,12 +12,18 @@ trait ResponseStringMarshaller {
|-fine-tune model: ${fileInfo.fine_tuned_model.getOrElse("N/A")}
|-organization id: ${fileInfo.organization_id}
|-status: ${fileInfo.status}
- |-training files:\n${fileInfo.training_files.map(fileInfoToString).mkString("\n")}
- |-validation files:\n${fileInfo.validation_files.map(fileInfoToString).mkString("\n")}
- |-result files:\n${fileInfo.result_files.map(fileInfoToString).mkString("\n")}
- """.stripMargin
-
- def fileInfoToString(fileInfo: FileInfo) =
+ |-training files:\n${fileInfo.training_files
+ .map(fileInfoToString)
+ .mkString("\n")}
+ |-validation files:\n${fileInfo.validation_files
+ .map(fileInfoToString)
+ .mkString("\n")}
+ |-result files:\n${fileInfo.result_files
+ .map(fileInfoToString)
+ .mkString("\n")}
+ """.stripMargin
+
+ def fileInfoToString(fileInfo: FileInfo): String =
s"""File Info
|-id: ${fileInfo.id}
|-filename: ${fileInfo.filename}
@@ -29,40 +34,46 @@ trait ResponseStringMarshaller {
|-purpose: ${fileInfo.purpose}
""".stripMargin
- def imageToString(image: ImageInfo) =
+ def imageToString(image: ImageInfo): String =
s"""Image
|-created: ${image.created.toString}
|-data: ${image.data.map(_.mkString(", ")).mkString("; ")}
""".stripMargin
- def embeddingToString(embedding: EmbeddingResponse) =
+ def embeddingToString(embedding: EmbeddingResponse): String =
s"""Completion
|-model: ${embedding.model}
|-data: ${embedding.data.map(embeddingInfoToString).mkString("\n")}
|-usage: ${usageToString(embedding.usage)},
""".stripMargin
- def embeddingInfoToString(embeddingInfo: EmbeddingInfo) =
+ def embeddingInfoToString(embeddingInfo: EmbeddingInfo): String =
s"""Embedding Info
|-index: ${embeddingInfo.index}
|-embedding: ${embeddingInfo.embedding.mkString(", ")}
""".stripMargin
- def moderationToString(edit: ModerationResponse) =
+ def moderationToString(edit: ModerationResponse): String =
s"""Moderation
|-id: ${edit.id}
|-model: ${edit.model}
|-results: ${edit.results.map(moderationResultToString).mkString("\n")}
""".stripMargin
- def moderationResultToString(moderationResult: ModerationResult) =
+ def moderationResultToString(moderationResult: ModerationResult): String =
s"""Moderation Result
- |-categories: ${moderationCategoriesToString(moderationResult.categories)}
- |-category scores: ${moderationCategoryScoresToString(moderationResult.category_scores)}
+ |-categories: ${moderationCategoriesToString(
+ moderationResult.categories
+ )}
+ |-category scores: ${moderationCategoryScoresToString(
+ moderationResult.category_scores
+ )}
|-flagged: ${moderationResult.flagged}
""".stripMargin
- def moderationCategoriesToString(moderationCategories: ModerationCategories) =
+ def moderationCategoriesToString(
+ moderationCategories: ModerationCategories
+ ): String =
s"""Moderation Categories
|-hate: ${moderationCategories.hate}
|-hate threatening: ${moderationCategories.hate_threatening}
@@ -73,7 +84,9 @@ trait ResponseStringMarshaller {
|-violence_graphic: ${moderationCategories.violence_graphic}
""".stripMargin
- def moderationCategoryScoresToString(moderationCategoryScores: ModerationCategoryScores) =
+ def moderationCategoryScoresToString(
+ moderationCategoryScores: ModerationCategoryScores
+ ): String =
s"""Moderation Category Scores
|-hate: ${moderationCategoryScores.hate}
|-hate threatening: ${moderationCategoryScores.hate_threatening}
@@ -84,30 +97,32 @@ trait ResponseStringMarshaller {
|-violence_graphic: ${moderationCategoryScores.violence_graphic}
""".stripMargin
- def editToString(edit: TextEditResponse) =
+ def editToString(edit: TextEditResponse): String =
s"""Completion
|-created: ${edit.created.toString}
|-usage: ${usageToString(edit.usage)}
|-choices: ${edit.choices.map(editChoiceToString).mkString("\n")}
""".stripMargin
- def editChoiceToString(choice: TextEditChoiceInfo) =
+ def editChoiceToString(choice: TextEditChoiceInfo): String =
s"""Choice
|-index: ${choice.index}
|-text: ${choice.text}
|-logprobs: ${choice.logprobs.map(logprobsToString).getOrElse("N/A")}
""".stripMargin
- def completionToString(completion: TextCompletionResponse) =
+ def completionToString(completion: TextCompletionResponse): String =
s"""Completion
|-id: ${completion.id}
|-model: ${completion.model}
|-created" ${completion.created.toString}
|-usage: ${completion.usage.map(usageToString).getOrElse("N/A")}
- |-choices: ${completion.choices.map(completionChoiceToString).mkString("\n")}
+ |-choices: ${completion.choices
+ .map(completionChoiceToString)
+ .mkString("\n")}
""".stripMargin
- def completionChoiceToString(choice: TextCompletionChoiceInfo) =
+ def completionChoiceToString(choice: TextCompletionChoiceInfo): String =
s"""Choice
|-index: ${choice.index}
|-text: ${choice.text}
@@ -115,24 +130,26 @@ trait ResponseStringMarshaller {
|-finish reason: ${choice.finish_reason}
""".stripMargin
- def logprobsToString(logProb: LogprobsInfo) =
+ def logprobsToString(logProb: LogprobsInfo): String =
s"""Logprobs
|-tokens: ${logProb.tokens.mkString(", ")}
|-token_logprobs: ${logProb.token_logprobs.mkString(", ")}
- |-top_logprobs: ${logProb.top_logprobs.map(_.mkString(",")).mkString("; ")}
+ |-top_logprobs: ${logProb.top_logprobs
+ .map(_.mkString(","))
+ .mkString("; ")}
|-text_offset: ${logProb.text_offset.mkString(", ")}
""".stripMargin
- def usageToString(usage: UsageInfo) =
+ def usageToString(usage: UsageInfo): String =
s"""Usage
|-prompt tokens: ${usage.prompt_tokens}
|-completion tokens: ${usage.completion_tokens.getOrElse("N/A")}
|-total tokens: ${usage.total_tokens}
""".stripMargin
- def usageToString(usage: EmbeddingUsageInfo) =
+ def usageToString(usage: EmbeddingUsageInfo): String =
s"""Usage
|-prompt tokens: ${usage.prompt_tokens}
|-total tokens: ${usage.total_tokens}
""".stripMargin
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextCompletionResponse.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextCompletionResponse.scala
index 8fa110bf..efa25267 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextCompletionResponse.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextCompletionResponse.scala
@@ -3,29 +3,29 @@ package io.cequence.openaiscala.domain.response
import java.{util => ju}
case class TextCompletionResponse(
- id: String,
- created: ju.Date,
- model: String,
- choices: Seq[TextCompletionChoiceInfo],
- usage: Option[UsageInfo]
+ id: String,
+ created: ju.Date,
+ model: String,
+ choices: Seq[TextCompletionChoiceInfo],
+ usage: Option[UsageInfo]
)
case class TextCompletionChoiceInfo(
- text: String,
- index: Int,
- logprobs: Option[LogprobsInfo],
- finish_reason: Option[String]
+ text: String,
+ index: Int,
+ logprobs: Option[LogprobsInfo],
+ finish_reason: Option[String]
)
case class UsageInfo(
- prompt_tokens: Int,
- total_tokens: Int,
- completion_tokens: Option[Int]
+ prompt_tokens: Int,
+ total_tokens: Int,
+ completion_tokens: Option[Int]
)
case class LogprobsInfo(
- tokens: Seq[String],
- token_logprobs: Seq[Double],
- top_logprobs: Seq[Map[String, Double]],
- text_offset: Seq[Int]
-)
\ No newline at end of file
+ tokens: Seq[String],
+ token_logprobs: Seq[Double],
+ top_logprobs: Seq[Map[String, Double]],
+ text_offset: Seq[Int]
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextEditResponse.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextEditResponse.scala
index 47db0bad..000bf075 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextEditResponse.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TextEditResponse.scala
@@ -3,13 +3,13 @@ package io.cequence.openaiscala.domain.response
import java.{util => ju}
case class TextEditResponse(
- created: ju.Date,
- choices: Seq[TextEditChoiceInfo],
- usage: UsageInfo
+ created: ju.Date,
+ choices: Seq[TextEditChoiceInfo],
+ usage: UsageInfo
)
case class TextEditChoiceInfo(
- text: String,
- index: Int,
- logprobs: Option[LogprobsInfo]
+ text: String,
+ index: Int,
+ logprobs: Option[LogprobsInfo]
)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TranscriptResponse.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TranscriptResponse.scala
index 803ad428..dc2b7c71 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TranscriptResponse.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/response/TranscriptResponse.scala
@@ -1,6 +1,6 @@
package io.cequence.openaiscala.domain.response
case class TranscriptResponse(
- text: String,
- verboseJson: Option[String] = None
-)
\ No newline at end of file
+ text: String,
+ verboseJson: Option[String] = None
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateChatCompletionSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateChatCompletionSettings.scala
index e0bcbe3b..e5638cf1 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateChatCompletionSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateChatCompletionSettings.scala
@@ -1,50 +1,50 @@
package io.cequence.openaiscala.domain.settings
case class CreateChatCompletionSettings(
- // ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
- model: String,
+ // ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
+ model: String,
- // What sampling temperature to use, between 0 and 2.
- // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- // We generally recommend altering this or top_p but not both. Defaults to 1.
- temperature: Option[Double] = None,
+ // What sampling temperature to use, between 0 and 2.
+ // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ // We generally recommend altering this or top_p but not both. Defaults to 1.
+ temperature: Option[Double] = None,
- // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
- // So 0.1 means only the tokens comprising the top 10% probability mass are considered.
- // We generally recommend altering this or temperature but not both. Defaults to 1.
- top_p: Option[Double] = None,
+ // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ // So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ // We generally recommend altering this or temperature but not both. Defaults to 1.
+ top_p: Option[Double] = None,
- // How many chat completion choices to generate for each input message. Defaults to 1.
- n: Option[Int] = None,
+ // How many chat completion choices to generate for each input message. Defaults to 1.
+ n: Option[Int] = None,
// stream
- // Up to 4 sequences where the API will stop generating further tokens.
- stop: Seq[String] = Nil, // Option[String or Array],
-
- // The maximum number of tokens allowed for the generated answer.
- // By default, the number of tokens the model can return will be (4096 - prompt tokens).
- max_tokens: Option[Int] = None,
-
- // Number between -2.0 and 2.0.
- // Positive values penalize new tokens based on whether they appear in the text so far,
- // increasing the model's likelihood to talk about new topics.
- // Defaults to 0.
- presence_penalty: Option[Double] = None,
-
- // Number between -2.0 and 2.0.
- // Positive values penalize new tokens based on their existing frequency in the text so far,
- // decreasing the model's likelihood to repeat the same line verbatim.
- // Defaults to 0.
- frequency_penalty: Option[Double] = None,
-
- // Modify the likelihood of specified tokens appearing in the completion.
- // Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
- // Mathematically, the bias is added to the logits generated by the model prior to sampling.
- // The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
- // values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
- logit_bias: Map[String, Int] = Map(),
-
- // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
- user: Option[String] = None
+ // Up to 4 sequences where the API will stop generating further tokens.
+ stop: Seq[String] = Nil, // Option[String or Array],
+
+ // The maximum number of tokens allowed for the generated answer.
+ // By default, the number of tokens the model can return will be (4096 - prompt tokens).
+ max_tokens: Option[Int] = None,
+
+ // Number between -2.0 and 2.0.
+ // Positive values penalize new tokens based on whether they appear in the text so far,
+ // increasing the model's likelihood to talk about new topics.
+ // Defaults to 0.
+ presence_penalty: Option[Double] = None,
+
+ // Number between -2.0 and 2.0.
+ // Positive values penalize new tokens based on their existing frequency in the text so far,
+ // decreasing the model's likelihood to repeat the same line verbatim.
+ // Defaults to 0.
+ frequency_penalty: Option[Double] = None,
+
+ // Modify the likelihood of specified tokens appearing in the completion.
+ // Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
+ // Mathematically, the bias is added to the logits generated by the model prior to sampling.
+ // The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
+ // values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
+ logit_bias: Map[String, Int] = Map(),
+
+ // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ user: Option[String] = None
)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateCompletionSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateCompletionSettings.scala
index 1f7c7dd5..976bb2ea 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateCompletionSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateCompletionSettings.scala
@@ -1,70 +1,70 @@
package io.cequence.openaiscala.domain.settings
case class CreateCompletionSettings(
- // ID of the model to use
- model: String,
+ // ID of the model to use
+ model: String,
- // The suffix that comes after a completion of inserted text.
- suffix: Option[String] = None,
+ // The suffix that comes after a completion of inserted text.
+ suffix: Option[String] = None,
- // The maximum number of tokens to generate in the completion.
- // The token count of your prompt plus max_tokens cannot exceed the model's context length.
- // Most models have a context length of 2048 tokens (except for the newest models, which support 4096). Defaults to 16.
- max_tokens: Option[Int] = None,
+ // The maximum number of tokens to generate in the completion.
+ // The token count of your prompt plus max_tokens cannot exceed the model's context length.
+ // Most models have a context length of 2048 tokens (except for the newest models, which support 4096). Defaults to 16.
+ max_tokens: Option[Int] = None,
- // What sampling temperature to use, between 0 and 2.
- // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- // We generally recommend altering this or top_p but not both. Defaults to 1.
- temperature: Option[Double] = None,
+ // What sampling temperature to use, between 0 and 2.
+ // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ // We generally recommend altering this or top_p but not both. Defaults to 1.
+ temperature: Option[Double] = None,
- // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
- // So 0.1 means only the tokens comprising the top 10% probability mass are considered.
- // We generally recommend altering this or temperature but not both. Defaults to 1.
- top_p: Option[Double] = None,
+ // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ // So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ // We generally recommend altering this or temperature but not both. Defaults to 1.
+ top_p: Option[Double] = None,
- // How many completions to generate for each prompt.
- // Note: Because this parameter generates many completions, it can quickly consume your token quota.
- // Use carefully and ensure that you have reasonable settings for max_tokens and stop. Defaults to 1.
- n: Option[Int] = None,
+ // How many completions to generate for each prompt.
+ // Note: Because this parameter generates many completions, it can quickly consume your token quota.
+ // Use carefully and ensure that you have reasonable settings for max_tokens and stop. Defaults to 1.
+ n: Option[Int] = None,
- // Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
- // For example, if logprobs is 5, the API will return a list of the 5 most likely tokens.
- // The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
- // The maximum value for logprobs is 5.
- logprobs: Option[Int] = None,
+ // Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
+ // For example, if logprobs is 5, the API will return a list of the 5 most likely tokens.
+ // The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
+ // The maximum value for logprobs is 5.
+ logprobs: Option[Int] = None,
- // Echo back the prompt in addition to the completion. Defaults to false.
- echo: Option[Boolean] = None,
+ // Echo back the prompt in addition to the completion. Defaults to false.
+ echo: Option[Boolean] = None,
- // Up to 4 sequences where the API will stop generating further tokens.
- // The returned text will not contain the stop sequence.
- stop: Seq[String] = Nil, // Option[String or Array],
+ // Up to 4 sequences where the API will stop generating further tokens.
+ // The returned text will not contain the stop sequence.
+ stop: Seq[String] = Nil, // Option[String or Array],
- // Number between -2.0 and 2.0.
- // Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- // Defaults to 0.
- presence_penalty: Option[Double] = None,
+ // Number between -2.0 and 2.0.
+ // Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
+ // Defaults to 0.
+ presence_penalty: Option[Double] = None,
- // Number between -2.0 and 2.0.
- // Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- // Defaults to 0.
- frequency_penalty: Option[Double] = None,
+ // Number between -2.0 and 2.0.
+ // Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ // Defaults to 0.
+ frequency_penalty: Option[Double] = None,
- // Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token).
- // Results cannot be streamed. When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.
- // Note: Because this parameter generates many completions, it can quickly consume your token quota.
- // Use carefully and ensure that you have reasonable settings for max_tokens and stop. Defaults to 1.
- best_of: Option[Int] = None,
+ // Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token).
+ // Results cannot be streamed. When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.
+ // Note: Because this parameter generates many completions, it can quickly consume your token quota.
+ // Use carefully and ensure that you have reasonable settings for max_tokens and stop. Defaults to 1.
+ best_of: Option[Int] = None,
- // Modify the likelihood of specified tokens appearing in the completion.
- // Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100.
- // You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs.
- // Mathematically, the bias is added to the logits generated by the model prior to sampling.
- // The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
- // values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
- // As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated.
- logit_bias: Map[String, Int] = Map(),
+ // Modify the likelihood of specified tokens appearing in the completion.
+ // Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100.
+ // You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs.
+ // Mathematically, the bias is added to the logits generated by the model prior to sampling.
+ // The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
+ // values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
+ // As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated.
+ logit_bias: Map[String, Int] = Map(),
- // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
- user: Option[String] = None
+ // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ user: Option[String] = None
)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEditSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEditSettings.scala
index 32db5df4..4d133a11 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEditSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEditSettings.scala
@@ -1,19 +1,19 @@
package io.cequence.openaiscala.domain.settings
case class CreateEditSettings(
- // ID of the model to use
- model: String,
+ // ID of the model to use
+ model: String,
- // How many edits to generate for the input and instruction. Defaults to 1.
- n: Option[Int] = None,
+ // How many edits to generate for the input and instruction. Defaults to 1.
+ n: Option[Int] = None,
- // What sampling temperature to use. Higher values means the model will take more risks.
- // Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
- // We generally recommend altering this or top_p but not both. Defaults to 1.
- temperature: Option[Double] = None,
+ // What sampling temperature to use. Higher values means the model will take more risks.
+ // Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ // We generally recommend altering this or top_p but not both. Defaults to 1.
+ temperature: Option[Double] = None,
- // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
- // So 0.1 means only the tokens comprising the top 10% probability mass are considered.
- // We generally recommend altering this or temperature but not both. Defaults to 1.
- top_p: Option[Double] = None
+ // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ // So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ // We generally recommend altering this or temperature but not both. Defaults to 1.
+ top_p: Option[Double] = None
)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEmbeddingsSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEmbeddingsSettings.scala
index a63e4f2c..59ba0964 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEmbeddingsSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateEmbeddingsSettings.scala
@@ -1,9 +1,9 @@
package io.cequence.openaiscala.domain.settings
case class CreateEmbeddingsSettings(
- // ID of the model to use.
- model: String,
+ // ID of the model to use.
+ model: String,
- // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
- user: Option[String] = None
+ // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ user: Option[String] = None
)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateFineTuneSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateFineTuneSettings.scala
index 80e14cef..23424f14 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateFineTuneSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateFineTuneSettings.scala
@@ -1,60 +1,59 @@
package io.cequence.openaiscala.domain.settings
case class CreateFineTuneSettings(
-
- // The name of the base model to fine-tune.
- // You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21.
- // To learn more about these models, see the Models documentation.
- // Defaults to 'curie'
- model: Option[String] = None,
-
- // The number of epochs to train the model for.
- // An epoch refers to one full cycle through the training dataset.
- // Defaults to 4
- n_epochs: Option[Int] = None,
-
- // The batch size to use for training.
- // The batch size is the number of training examples used to train a single forward and backward pass.
- // By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set,
- // capped at 256 - in general, we've found that larger batch sizes tend to work better for larger datasets.
- batch_size: Option[Int] = None,
-
- // The learning rate multiplier to use for training.
- // The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value.
- // By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes).
- // We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results.
- learning_rate_multiplier: Option[Double] = None,
-
- // The weight to use for loss on the prompt tokens.
- // This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0),
- // and can add a stabilizing effect to training when completions are short.
- // If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt.
- // Defaults to 0.01
- prompt_loss_weight: Option[Double] = None,
-
- // If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch.
- // These metrics can be viewed in the results file.
- // In order to compute classification metrics, you must provide a validation_file.
- // Additionally, you must specify classification_n_classes for multiclass classification or classification_positive_class for binary classification.
- // Defaults to false
- compute_classification_metrics: Option[Boolean] = None,
-
- // The number of classes in a classification task.
- // This parameter is required for multiclass classification.
- classification_n_classes: Option[Int] = None,
-
- // The positive class in binary classification.
- // This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification.
- classification_positive_class: Option[String] = None,
-
- // If this is provided, we calculate F-beta scores at the specified beta values.
- // The F-beta score is a generalization of F-1 score. This is only used for binary classification.
- // With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight.
- // A larger beta score puts more weight on recall and less on precision.
- // A smaller beta score puts more weight on precision and less on recall.
- classification_betas: Option[Seq[Double]] = None,
-
- // A string of up to 40 characters that will be added to your fine-tuned model name.
- // For example, a suffix of "custom-model-name" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04.
- suffix: Option[String] = None
-)
\ No newline at end of file
+ // The name of the base model to fine-tune.
+ // You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21.
+ // To learn more about these models, see the Models documentation.
+ // Defaults to 'curie'
+ model: Option[String] = None,
+
+ // The number of epochs to train the model for.
+ // An epoch refers to one full cycle through the training dataset.
+ // Defaults to 4
+ n_epochs: Option[Int] = None,
+
+ // The batch size to use for training.
+ // The batch size is the number of training examples used to train a single forward and backward pass.
+ // By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set,
+ // capped at 256 - in general, we've found that larger batch sizes tend to work better for larger datasets.
+ batch_size: Option[Int] = None,
+
+ // The learning rate multiplier to use for training.
+ // The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value.
+ // By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes).
+ // We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results.
+ learning_rate_multiplier: Option[Double] = None,
+
+ // The weight to use for loss on the prompt tokens.
+ // This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0),
+ // and can add a stabilizing effect to training when completions are short.
+ // If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt.
+ // Defaults to 0.01
+ prompt_loss_weight: Option[Double] = None,
+
+ // If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch.
+ // These metrics can be viewed in the results file.
+ // In order to compute classification metrics, you must provide a validation_file.
+ // Additionally, you must specify classification_n_classes for multiclass classification or classification_positive_class for binary classification.
+ // Defaults to false
+ compute_classification_metrics: Option[Boolean] = None,
+
+ // The number of classes in a classification task.
+ // This parameter is required for multiclass classification.
+ classification_n_classes: Option[Int] = None,
+
+ // The positive class in binary classification.
+ // This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification.
+ classification_positive_class: Option[String] = None,
+
+ // If this is provided, we calculate F-beta scores at the specified beta values.
+ // The F-beta score is a generalization of F-1 score. This is only used for binary classification.
+ // With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight.
+ // A larger beta score puts more weight on recall and less on precision.
+ // A smaller beta score puts more weight on precision and less on recall.
+ classification_betas: Option[Seq[Double]] = None,
+
+ // A string of up to 40 characters that will be added to your fine-tuned model name.
+ // For example, a suffix of "custom-model-name" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04.
+ suffix: Option[String] = None
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateImageSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateImageSettings.scala
index 1cc1a071..3ae95360 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateImageSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateImageSettings.scala
@@ -3,17 +3,17 @@ package io.cequence.openaiscala.domain.settings
import io.cequence.openaiscala.domain.EnumValue
case class CreateImageSettings(
- // The number of images to generate. Must be between 1 and 10. Defaults to 1
- n: Option[Int] = None,
+ // The number of images to generate. Must be between 1 and 10. Defaults to 1
+ n: Option[Int] = None,
- // The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. Defaults to 1024x1024
- size: Option[ImageSizeType] = None,
+ // The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. Defaults to 1024x1024
+ size: Option[ImageSizeType] = None,
- // The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url
- response_format: Option[ImageResponseFormatType] = None,
+ // The format in which the generated images are returned. Must be one of url or b64_json. Defaults to url
+ response_format: Option[ImageResponseFormatType] = None,
- // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
- user: Option[String] = None
+ // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ user: Option[String] = None
)
sealed abstract class ImageSizeType(value: String) extends EnumValue(value)
@@ -30,4 +30,4 @@ sealed abstract class ImageResponseFormatType extends EnumValue()
object ImageResponseFormatType {
case object url extends ImageResponseFormatType
case object b64_json extends ImageResponseFormatType
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateModerationSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateModerationSettings.scala
index d50107d1..5d9bd3a1 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateModerationSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateModerationSettings.scala
@@ -1,11 +1,10 @@
package io.cequence.openaiscala.domain.settings
case class CreateModerationSettings(
-
- // Two content moderations models are available: text-moderation-stable and text-moderation-latest.
- // The default is text-moderation-latest which will be automatically upgraded over time.
- // This ensures you are always using our most accurate model.
- // If you use text-moderation-stable, we will provide advanced notice before updating the model.
- // Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.
- model: Option[String] = None
-)
\ No newline at end of file
+ // Two content moderations models are available: text-moderation-stable and text-moderation-latest.
+ // The default is text-moderation-latest which will be automatically upgraded over time.
+ // This ensures you are always using our most accurate model.
+ // If you use text-moderation-stable, we will provide advanced notice before updating the model.
+ // Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.
+ model: Option[String] = None
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranscriptionSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranscriptionSettings.scala
index ab18834a..c1b953db 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranscriptionSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranscriptionSettings.scala
@@ -3,22 +3,22 @@ package io.cequence.openaiscala.domain.settings
import io.cequence.openaiscala.domain.EnumValue
case class CreateTranscriptionSettings(
- // ID of the model to use. Only whisper-1 is currently available.
- model: String,
+ // ID of the model to use. Only whisper-1 is currently available.
+ model: String,
- // The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- // Defaults to json.
- response_format: Option[TranscriptResponseFormatType] = None,
+ // The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
+ // Defaults to json.
+ response_format: Option[TranscriptResponseFormatType] = None,
- // The sampling temperature, between 0 and 1.
- // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- // If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- // Defaults to 0.
- temperature: Option[Double] = None,
+ // The sampling temperature, between 0 and 1.
+ // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ // If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ // Defaults to 0.
+ temperature: Option[Double] = None,
- // The language of the input audio.
- // Supplying the input language in ISO-639-1 ('en', 'de', 'es', etc.) format will improve accuracy and latency.
- language: Option[String] = None
+ // The language of the input audio.
+ // Supplying the input language in ISO-639-1 ('en', 'de', 'es', etc.) format will improve accuracy and latency.
+ language: Option[String] = None
)
sealed abstract class TranscriptResponseFormatType extends EnumValue()
@@ -29,4 +29,4 @@ object TranscriptResponseFormatType {
case object srt extends TranscriptResponseFormatType
case object verbose_json extends TranscriptResponseFormatType
case object vtt extends TranscriptResponseFormatType
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranslationSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranslationSettings.scala
index 17a86d63..f2f0cb75 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranslationSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/CreateTranslationSettings.scala
@@ -1,16 +1,16 @@
package io.cequence.openaiscala.domain.settings
case class CreateTranslationSettings(
- // ID of the model to use. Only whisper-1 is currently available.
- model: String,
+ // ID of the model to use. Only whisper-1 is currently available.
+ model: String,
- // The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- // Defaults to json.
- response_format: Option[TranscriptResponseFormatType] = None,
+ // The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
+ // Defaults to json.
+ response_format: Option[TranscriptResponseFormatType] = None,
- // The sampling temperature, between 0 and 1.
- // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- // If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
- // Defaults to 0.
- temperature: Option[Double] = None
-)
\ No newline at end of file
+ // The sampling temperature, between 0 and 1.
+ // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ // If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ // Defaults to 0.
+ temperature: Option[Double] = None
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/UploadFileSettings.scala b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/UploadFileSettings.scala
index ffef66f4..24f7cc33 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/UploadFileSettings.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/domain/settings/UploadFileSettings.scala
@@ -1,8 +1,8 @@
package io.cequence.openaiscala.domain.settings
case class UploadFileSettings(
- // The intended purpose of the uploaded documents. Use "fine-tune" for Fine-tuning.
- // This allows us to validate the format of the uploaded file.
- // Note: currently only 'fine-tune' is supported (as of 2023-01-20)
- purpose: String
-)
\ No newline at end of file
+ // The intended purpose of the uploaded documents. Use "fine-tune" for Fine-tuning.
+ // This allows us to validate the format of the uploaded file.
+ // Note: currently only 'fine-tune' is supported (as of 2023-01-20)
+ purpose: String
+)
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIMultiServiceAdapter.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIMultiServiceAdapter.scala
index 9fd2e192..5d57fb91 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIMultiServiceAdapter.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIMultiServiceAdapter.scala
@@ -11,47 +11,46 @@ private trait OpenAIMultiServiceAdapter extends OpenAIServiceWrapper {
protected def calcIndex: Int
override protected def wrap[T](
- fun: OpenAIService => Future[T]
+ fun: OpenAIService => Future[T]
): Future[T] =
fun(underlyings(calcIndex))
- override def close() =
+ override def close(): Unit =
underlyings.foreach(_.close())
}
private class OpenAIMultiServiceRotationAdapter(
- val underlyings: Seq[OpenAIService]
+ val underlyings: Seq[OpenAIService]
) extends OpenAIMultiServiceAdapter {
private val atomicCounter = new AtomicInteger()
- protected def calcIndex =
+ protected def calcIndex: Int =
atomicCounter.getAndUpdate(index => (index + 1) % count)
}
private class OpenAIMultiServiceRandomAccessAdapter(
- val underlyings: Seq[OpenAIService]
+ val underlyings: Seq[OpenAIService]
) extends OpenAIMultiServiceAdapter {
- protected def calcIndex = Random.nextInt(count)
+ protected def calcIndex: Int = Random.nextInt(count)
}
-/**
- * Load distribution for multiple OpenAIService instances using:
- * - rotation type (aka round robin)
- * - random access/order
- */
+/** Load distribution for multiple OpenAIService instances using:
+ * - rotation type (aka round robin)
+ * - random access/order
+ */
object OpenAIMultiServiceAdapter {
@deprecated("Use ofRoundRobinType instead")
def ofRotationType(underlyings: OpenAIService*): OpenAIService =
- ofRoundRobinType(underlyings:_*)
+ ofRoundRobinType(underlyings: _*)
@deprecated("Use ofRandomOrderType instead")
def ofRandomAccessType(underlyings: OpenAIService*): OpenAIService =
- ofRandomOrderType(underlyings:_*)
+ ofRandomOrderType(underlyings: _*)
def ofRoundRobinType(underlyings: OpenAIService*): OpenAIService =
new OpenAIMultiServiceRotationAdapter(underlyings)
def ofRandomOrderType(underlyings: OpenAIService*): OpenAIService =
new OpenAIMultiServiceRandomAccessAdapter(underlyings)
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIRetryServiceAdapter.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIRetryServiceAdapter.scala
index a81d3b64..67c4fbcc 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIRetryServiceAdapter.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIRetryServiceAdapter.scala
@@ -1,19 +1,23 @@
package io.cequence.openaiscala.service
-import io.cequence.openaiscala.{OpenAIScalaTokenCountExceededException, StackWalkerUtil}
+import io.cequence.openaiscala.{
+ OpenAIScalaTokenCountExceededException,
+ StackWalkerUtil
+}
import scala.concurrent.{ExecutionContext, Future}
private class OpenAIRetryServiceAdapter(
- underlying: OpenAIService,
- maxAttempts: Int,
- sleepOnFailureMs: Option[Int] = None,
- log: String => Unit = println)(
- implicit ec: ExecutionContext
+ underlying: OpenAIService,
+ maxAttempts: Int,
+ sleepOnFailureMs: Option[Int] = None,
+ log: String => Unit = println
+)(implicit
+ ec: ExecutionContext
) extends OpenAIServiceWrapper {
override protected def wrap[T](
- fun: OpenAIService => Future[T]
+ fun: OpenAIService => Future[T]
): Future[T] = {
// need to use StackWalker to get the caller function name
val functionName = StackWalkerUtil.functionName(2).get()
@@ -25,9 +29,8 @@ private class OpenAIRetryServiceAdapter(
override def close(): Unit =
underlying.close()
- private def retry[T](
- failureMessage: String)(
- f: => Future[T]
+ private def retry[T](failureMessage: String)(
+ f: => Future[T]
): Future[T] = {
def retryAux(attempt: Int): Future[T] =
f.recoverWith {
@@ -37,7 +40,9 @@ private class OpenAIRetryServiceAdapter(
throw e
case e: Exception =>
if (attempt < maxAttempts) {
- log(s"${failureMessage}. ${e.getMessage}. Attempt ${attempt}. Retrying...")
+ log(
+ s"${failureMessage}. ${e.getMessage}. Attempt ${attempt}. Retrying..."
+ )
sleepOnFailureMs.foreach(
Thread.sleep(_)
@@ -52,14 +57,14 @@ private class OpenAIRetryServiceAdapter(
}
}
-
object OpenAIRetryServiceAdapter {
def apply(
- underlying: OpenAIService,
- maxAttempts: Int,
- sleepOnFailureMs: Option[Int] = None,
- log: String => Unit = println)(
- implicit ec: ExecutionContext
+ underlying: OpenAIService,
+ maxAttempts: Int,
+ sleepOnFailureMs: Option[Int] = None,
+ log: String => Unit = println
+ )(implicit
+ ec: ExecutionContext
): OpenAIService =
new OpenAIRetryServiceAdapter(
underlying,
@@ -67,4 +72,4 @@ object OpenAIRetryServiceAdapter {
sleepOnFailureMs,
log
)
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala
index 2c1cde72..c0a84d02 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIService.scala
@@ -1,374 +1,513 @@
package io.cequence.openaiscala.service
-import io.cequence.openaiscala.domain.{FunMessageSpec, FunctionSpec, MessageSpec}
+import io.cequence.openaiscala.domain.{
+ FunMessageSpec,
+ FunctionSpec,
+ MessageSpec
+}
import io.cequence.openaiscala.domain.settings._
import io.cequence.openaiscala.domain.response._
import java.io.File
import scala.concurrent.Future
-/**
- * Central service to access all public OpenAI WS endpoints as defined at the API ref. page
- *
- * The following services are supported:
- *
- * - '''Models''': listModels, and retrieveModel
- * - '''Completions''': createCompletion
- * - '''Chat Completions''': createChatCompletion, and createChatFunCompletion
- * - '''Edits''': createEdit
- * - '''Images''': createImage, createImageEdit, createImageVariation
- * - '''Embeddings''': createEmbeddings
- * - '''Audio''': createAudioTranscription, and createAudioTranslation
- * - '''Files''': listFiles, uploadFile, deleteFile, retrieveFile, and retrieveFileContent
- * - '''Fine-tunes''': createFineTune, listFineTunes, retrieveFineTune, cancelFineTune, listFineTuneEvents, and deleteFineTuneModel
- * - '''Moderations''': createModeration
- *
- * @since Jan 2023
- */
+/** Central service to access all public OpenAI WS endpoints as defined at the API ref. page
+ *
+ * The following services are supported:
+ *
+ * - '''Models''': listModels, and retrieveModel
+ * - '''Completions''': createCompletion
+ * - '''Chat Completions''': createChatCompletion, and
+ * createChatFunCompletion
+ * - '''Edits''': createEdit
+ * - '''Images''': createImage, createImageEdit, createImageVariation
+ * - '''Embeddings''': createEmbeddings
+ * - '''Audio''': createAudioTranscription, and createAudioTranslation
+ * - '''Files''': listFiles, uploadFile, deleteFile, retrieveFile, and
+ * retrieveFileContent
+ * - '''Fine-tunes''': createFineTune, listFineTunes, retrieveFineTune,
+ * cancelFineTune, listFineTuneEvents, and deleteFineTuneModel
+ * - '''Moderations''': createModeration
+ *
+ * @since Jan
+ * 2023
+ */
trait OpenAIService extends OpenAIServiceConsts {
- /**
- * Lists the currently available models, and provides basic information about each one such as the owner and availability.
- *
- * @return models
- *
- * @see OpenAI Doc
- */
+ /** Lists the currently available models, and provides basic information about
+ * each one such as the owner and availability.
+ *
+ * @return
+ * models
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def listModels: Future[Seq[ModelInfo]]
- /**
- * Retrieves a model instance, providing basic information about the model such as the owner and permissions.
- *
- * @param modelId The ID of the model to use for this request
- * @return model or None if not found
- *
- * @see OpenAI Doc
- */
+ /** Retrieves a model instance, providing basic information about the model
+ * such as the owner and permissions.
+ *
+ * @param modelId
+ * The ID of the model to use for this request
+ * @return
+ * model or None if not found
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def retrieveModel(modelId: String): Future[Option[ModelInfo]]
- /**
- * Creates a completion for the provided prompt and parameters.
- *
- * @param prompt The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
- Note that <|endoftext|> is the document separator that the model sees during training,
- so if a prompt is not specified the model will generate as if from the beginning of a new document.
- * @param settings
- * @return text completion response
- *
- * @see OpenAI Doc
- */
+ /** Creates a completion for the provided prompt and parameters.
+ *
+ * @param prompt
+ * The prompt(s) to generate completions for, encoded as a string, array of
+ * strings, array of tokens, or array of token arrays. Note that
+ * <|endoftext|> is the document separator that the model sees during
+ * training, so if a prompt is not specified the model will generate as if
+ * from the beginning of a new document.
+ * @param settings
+ * @return
+ * text completion response
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createCompletion(
- prompt: String,
- settings: CreateCompletionSettings = DefaultSettings.CreateCompletion
+ prompt: String,
+ settings: CreateCompletionSettings = DefaultSettings.CreateCompletion
): Future[TextCompletionResponse]
- /**
- * Creates a model response for the given chat conversation.
- *
- * @param messages A list of messages comprising the conversation so far.
- * @param settings
- * @return chat completion response
- * @see OpenAI Doc
- */
+ /** Creates a model response for the given chat conversation.
+ *
+ * @param messages
+ * A list of messages comprising the conversation so far.
+ * @param settings
+ * @return
+ * chat completion response
+ * @see
+ * OpenAI
+ * Doc
+ */
def createChatCompletion(
- messages: Seq[MessageSpec],
- settings: CreateChatCompletionSettings = DefaultSettings.CreateChatCompletion
+ messages: Seq[MessageSpec],
+ settings: CreateChatCompletionSettings =
+ DefaultSettings.CreateChatCompletion
): Future[ChatCompletionResponse]
- /**
- * Creates a model response for the given chat conversation expecting a function call.
- *
- * @param messages A list of messages comprising the conversation so far.
- * @param functions A list of functions the model may generate JSON inputs for.
- * @param responseFunctionName If specified it forces the model to respond with a call to that function (must be listed in `functions`).
- * Otherwise, the default "auto" mode is used where the model can pick between an end-user or calling a function.
- * @param settings
- * @return chat completion response
- * @see OpenAI Doc
- */
+ /** Creates a model response for the given chat conversation expecting a
+ * function call.
+ *
+ * @param messages
+ * A list of messages comprising the conversation so far.
+ * @param functions
+ * A list of functions the model may generate JSON inputs for.
+ * @param responseFunctionName
+ * If specified it forces the model to respond with a call to that function
+ * (must be listed in `functions`). Otherwise, the default "auto" mode is
+ * used where the model can pick between an end-user or calling a function.
+ * @param settings
+ * @return
+ * chat completion response
+ * @see
+ * OpenAI
+ * Doc
+ */
def createChatFunCompletion(
- messages: Seq[FunMessageSpec],
- functions: Seq[FunctionSpec],
- responseFunctionName: Option[String] = None,
- settings: CreateChatCompletionSettings = DefaultSettings.CreateChatFunCompletion
+ messages: Seq[FunMessageSpec],
+ functions: Seq[FunctionSpec],
+ responseFunctionName: Option[String] = None,
+ settings: CreateChatCompletionSettings =
+ DefaultSettings.CreateChatFunCompletion
): Future[ChatFunCompletionResponse]
- /**
- * Creates a new edit for the provided input, instruction, and parameters.
- *
- * @param input The input text to use as a starting point for the edit.
- * @param instruction The instruction that tells the model how to edit the prompt.
- * @param settings
- * @return text edit response
- *
- * @see OpenAI Doc
- */
+ /** Creates a new edit for the provided input, instruction, and parameters.
+ *
+ * @param input
+ * The input text to use as a starting point for the edit.
+ * @param instruction
+ * The instruction that tells the model how to edit the prompt.
+ * @param settings
+ * @return
+ * text edit response
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createEdit(
- input: String,
- instruction: String,
- settings: CreateEditSettings = DefaultSettings.CreateEdit
+ input: String,
+ instruction: String,
+ settings: CreateEditSettings = DefaultSettings.CreateEdit
): Future[TextEditResponse]
- /**
- * Creates an image given a prompt.
- *
- * @param prompt A text description of the desired image(s). The maximum length is 1000 characters.
- * @param settings
- * @return image response (might contain multiple data items - one per image)
- *
- * @see OpenAI Doc
- */
+ /** Creates an image given a prompt.
+ *
+ * @param prompt
+ * A text description of the desired image(s). The maximum length is 1000
+ * characters.
+ * @param settings
+ * @return
+ * image response (might contain multiple data items - one per image)
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createImage(
- prompt: String,
- settings: CreateImageSettings = DefaultSettings.CreateImage
+ prompt: String,
+ settings: CreateImageSettings = DefaultSettings.CreateImage
): Future[ImageInfo]
- /**
- * Creates an edited or extended image given an original image and a prompt.
- *
- * @param prompt A text description of the desired image(s). The maximum length is 1000 characters.
- * @param image The image to edit. Must be a valid PNG file, less than 4MB, and square.
- * If mask is not provided, image must have transparency, which will be used as the mask.
- * @param mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited.
- * Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
- * @param settings
- * @return image response (might contain multiple data items - one per image)
- *
- * @see OpenAI Doc
- */
+ /** Creates an edited or extended image given an original image and a prompt.
+ *
+ * @param prompt
+ * A text description of the desired image(s). The maximum length is 1000
+ * characters.
+ * @param image
+ * The image to edit. Must be a valid PNG file, less than 4MB, and square.
+ * If mask is not provided, image must have transparency, which will be
+ * used as the mask.
+ * @param mask
+ * An additional image whose fully transparent areas (e.g. where alpha is
+ * zero) indicate where image should be edited. Must be a valid PNG file,
+ * less than 4MB, and have the same dimensions as image.
+ * @param settings
+ * @return
+ * image response (might contain multiple data items - one per image)
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createImageEdit(
- prompt: String,
- image: File,
- mask: Option[File] = None,
- settings: CreateImageSettings = DefaultSettings.CreateImageEdit
+ prompt: String,
+ image: File,
+ mask: Option[File] = None,
+ settings: CreateImageSettings = DefaultSettings.CreateImageEdit
): Future[ImageInfo]
- /**
- * Creates a variation of a given image.
- *
- * @param image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
- * @param settings
- * @return image response (might contain multiple data items - one per image)
- *
- * @see OpenAI Doc
- */
+ /** Creates a variation of a given image.
+ *
+ * @param image
+ * The image to use as the basis for the variation(s). Must be a valid PNG
+ * file, less than 4MB, and square.
+ * @param settings
+ * @return
+ * image response (might contain multiple data items - one per image)
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createImageVariation(
- image: File,
- settings: CreateImageSettings = DefaultSettings.CreateImageVariation
+ image: File,
+ settings: CreateImageSettings = DefaultSettings.CreateImageVariation
): Future[ImageInfo]
- /**
- * Creates an embedding vector representing the input text.
- *
- * @param input Input text to get embeddings for, encoded as a string or array of tokens.
- * To get embeddings for multiple inputs in a single request,
- * pass an array of strings or array of token arrays.
- * Each input must not exceed 8192 tokens in length.
- * @param settings
- * @return list of embeddings inside an envelope
- *
- * @see OpenAI Doc
- */
+ /** Creates an embedding vector representing the input text.
+ *
+ * @param input
+ * Input text to get embeddings for, encoded as a string or array of
+ * tokens. To get embeddings for multiple inputs in a single request, pass
+ * an array of strings or array of token arrays. Each input must not exceed
+ * 8192 tokens in length.
+ * @param settings
+ * @return
+ * list of embeddings inside an envelope
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createEmbeddings(
- input: Seq[String],
- settings: CreateEmbeddingsSettings = DefaultSettings.CreateEmbeddings
+ input: Seq[String],
+ settings: CreateEmbeddingsSettings = DefaultSettings.CreateEmbeddings
): Future[EmbeddingResponse]
- /**
- * Transcribes audio into the input language.
- *
- * @param file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param prompt An optional text to guide the model's style or continue a previous audio segment.
- * The prompt should match the audio language.
- * @param settings
- * @return transcription text
- *
- * @see OpenAI Doc
- */
+ /** Transcribes audio into the input language.
+ *
+ * @param file
+ * The audio file to transcribe, in one of these formats: mp3, mp4, mpeg,
+ * mpga, m4a, wav, or webm.
+ * @param prompt
+ * An optional text to guide the model's style or continue a previous audio
+ * segment. The prompt should match the audio language.
+ * @param settings
+ * @return
+ * transcription text
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createAudioTranscription(
- file: File,
- prompt: Option[String] = None,
- settings: CreateTranscriptionSettings = DefaultSettings.CreateTranscription
+ file: File,
+ prompt: Option[String] = None,
+ settings: CreateTranscriptionSettings =
+ DefaultSettings.CreateTranscription
): Future[TranscriptResponse]
- /**
- * Translates audio into into English.
- *
- * @param file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param prompt An optional text to guide the model's style or continue a previous audio segment.
- * The prompt should match the audio language.
- * @param settings
- * @return translation text
- *
- * @see OpenAI Doc
- */
+ /** Translates audio into into English.
+ *
+ * @param file
+ * The audio file to translate, in one of these formats: mp3, mp4, mpeg,
+ * mpga, m4a, wav, or webm.
+ * @param prompt
+ * An optional text to guide the model's style or continue a previous audio
+ * segment. The prompt should match the audio language.
+ * @param settings
+ * @return
+ * translation text
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createAudioTranslation(
- file: File,
- prompt: Option[String] = None,
- settings: CreateTranslationSettings = DefaultSettings.CreateTranslation
+ file: File,
+ prompt: Option[String] = None,
+ settings: CreateTranslationSettings = DefaultSettings.CreateTranslation
): Future[TranscriptResponse]
- /**
- * Returns a list of files that belong to the user's organization.
- *
- * @return file infos
- *
- * @see OpenAI Doc
- */
+ /** Returns a list of files that belong to the user's organization.
+ *
+ * @return
+ * file infos
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def listFiles: Future[Seq[FileInfo]]
- /**
- * Upload a file that contains document(s) to be used across various endpoints/features.
- * Currently, the size of all the files uploaded by one organization can be up to 1 GB.
- * Please contact us if you need to increase the storage limit.
- *
- * @param file Name of the JSON Lines file to be uploaded. If the purpose is set to "fine-tune",
- * each line is a JSON record with "prompt" and "completion" fields representing your training examples.
- * @param displayFileName (Explicit) display file name; if not specified a full path is used instead.
- * @param settings
- * @return file info
- *
- * @see OpenAI Doc
- */
+ /** Upload a file that contains document(s) to be used across various
+ * endpoints/features. Currently, the size of all the files uploaded by one
+ * organization can be up to 1 GB. Please contact us if you need to increase
+ * the storage limit.
+ *
+ * @param file
+ * Name of the JSON Lines file to be uploaded. If the purpose is set to
+ * "fine-tune", each line is a JSON record with "prompt" and "completion"
+ * fields representing your training
+ * examples.
+ * @param displayFileName
+ * (Explicit) display file name; if not specified a full path is used
+ * instead.
+ * @param settings
+ * @return
+ * file info
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def uploadFile(
- file: File,
- displayFileName: Option[String] = None,
- settings: UploadFileSettings = DefaultSettings.UploadFile
+ file: File,
+ displayFileName: Option[String] = None,
+ settings: UploadFileSettings = DefaultSettings.UploadFile
): Future[FileInfo]
- /**
- * Delete a file.
- *
- * @param fileId The ID of the file to use for this request
- * @return enum indicating whether the file was deleted
- *
- * @see OpenAI Doc
- */
+ /** Delete a file.
+ *
+ * @param fileId
+ * The ID of the file to use for this request
+ * @return
+ * enum indicating whether the file was deleted
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def deleteFile(
- fileId: String
+ fileId: String
): Future[DeleteResponse]
- /**
- * Returns information about a specific file.
- *
- * @param fileId The ID of the file to use for this request
- * @return file info or None if not found
- *
- * @see OpenAI Doc
- */
+ /** Returns information about a specific file.
+ *
+ * @param fileId
+ * The ID of the file to use for this request
+ * @return
+ * file info or None if not found
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def retrieveFile(
- fileId: String
+ fileId: String
): Future[Option[FileInfo]]
- /**
- * Returns the contents of the specified file.
- *
- * @param fileId The ID of the file to use for this request
- * @return file content or None if not found
- *
- * @see OpenAI Doc
- */
+ /** Returns the contents of the specified file.
+ *
+ * @param fileId
+ * The ID of the file to use for this request
+ * @return
+ * file content or None if not found
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def retrieveFileContent(
- fileId: String
+ fileId: String
): Future[Option[String]]
- /**
- * Creates a job that fine-tunes a specified model from a given dataset.
- * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete.
- *
- * @param training_file The ID of an uploaded file that contains training data. See uploadFile
for how to upload a file.
- * Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys "prompt" and "completion".
- * Additionally, you must upload your file with the purpose fine-tune.
- * @param validation_file The ID of an uploaded file that contains validation data.
- * If you provide this file, the data is used to generate validation metrics periodically during fine-tuning.
- * These metrics can be viewed in the fine-tuning results file. Your train and validation data should be mutually exclusive.
- * Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion".
- * Additionally, you must upload your file with the purpose fine-tune.
- * @param settings
- * @return fine tune response
- *
- * @see OpenAI API Doc
- * @see OpenAI Fine-Tuning Guide
- */
+ /** Creates a job that fine-tunes a specified model from a given dataset.
+ * Response includes details of the enqueued job including job status and the
+ * name of the fine-tuned models once complete.
+ *
+ * @param training_file
+ * The ID of an uploaded file that contains training data. See
+ * uploadFile
for how to upload a file. Your dataset must be
+ * formatted as a JSONL file, where each training example is a JSON object
+ * with the keys "prompt" and "completion". Additionally, you must upload
+ * your file with the purpose fine-tune.
+ * @param validation_file
+ * The ID of an uploaded file that contains validation data. If you provide
+ * this file, the data is used to generate validation metrics periodically
+ * during fine-tuning. These metrics can be viewed in the fine-tuning
+ * results file. Your train and validation data should be mutually
+ * exclusive. Your dataset must be formatted as a JSONL file, where each
+ * validation example is a JSON object with the keys "prompt" and
+ * "completion". Additionally, you must upload your file with the purpose
+ * fine-tune.
+ * @param settings
+ * @return
+ * fine tune response
+ *
+ * @see
+ * OpenAI
+ * API Doc
+ * @see
+ * OpenAI
+ * Fine-Tuning Guide
+ */
def createFineTune(
- training_file: String,
- validation_file: Option[String] = None,
- settings: CreateFineTuneSettings = DefaultSettings.CreateFineTune
+ training_file: String,
+ validation_file: Option[String] = None,
+ settings: CreateFineTuneSettings = DefaultSettings.CreateFineTune
): Future[FineTuneJob]
- /**
- * List your organization's fine-tuning jobs.
- *
- * @return fine tunes
- *
- * @see OpenAI Doc
- */
+ /** List your organization's fine-tuning jobs.
+ *
+ * @return
+ * fine tunes
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def listFineTunes: Future[Seq[FineTuneJob]]
- /**
- * Gets info about the fine-tune job.
- *
- * @param fineTuneId The ID of the fine-tune job
- * @return fine tune info
- *
- * @see OpenAI Doc
- */
+ /** Gets info about the fine-tune job.
+ *
+ * @param fineTuneId
+ * The ID of the fine-tune job
+ * @return
+ * fine tune info
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def retrieveFineTune(
- fineTuneId: String
+ fineTuneId: String
): Future[Option[FineTuneJob]]
- /**
- * Immediately cancel a fine-tune job.
- *
- * @param fineTuneId The ID of the fine-tune job to cancel
- * @return fine tune info or None if not found
- *
- * @see OpenAI Doc
- */
+ /** Immediately cancel a fine-tune job.
+ *
+ * @param fineTuneId
+ * The ID of the fine-tune job to cancel
+ * @return
+ * fine tune info or None if not found
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def cancelFineTune(
- fineTuneId: String
+ fineTuneId: String
): Future[Option[FineTuneJob]]
- /**
- * Get fine-grained status updates for a fine-tune job.
- *
- * @param fineTuneId The ID of the fine-tune job to get events for.
- * @return fine tune events or None if not found
- *
- * @see OpenAI Doc
- */
+ /** Get fine-grained status updates for a fine-tune job.
+ *
+ * @param fineTuneId
+ * The ID of the fine-tune job to get events for.
+ * @return
+ * fine tune events or None if not found
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def listFineTuneEvents(
- fineTuneId: String
+ fineTuneId: String
): Future[Option[Seq[FineTuneEvent]]]
- /**
- * Delete a fine-tuned model. You must have the Owner role in your organization.
- *
- * @param modelId The ID of the file to use for this request
- * @return enum indicating whether the model was deleted
- *
- * @see OpenAI Doc
- */
+ /** Delete a fine-tuned model. You must have the Owner role in your
+ * organization.
+ *
+ * @param modelId
+ * The ID of the file to use for this request
+ * @return
+ * enum indicating whether the model was deleted
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def deleteFineTuneModel(
- modelId: String
+ modelId: String
): Future[DeleteResponse]
- /**
- * Classifies if text violates OpenAI's Content Policy.
- *
- * @param input The input text to classify
- * @param settings
- * @return moderation results
- *
- * @see OpenAI Doc
- */
+ /** Classifies if text violates OpenAI's Content Policy.
+ *
+ * @param input
+ * The input text to classify
+ * @param settings
+ * @return
+ * moderation results
+ *
+ * @see
+ * OpenAI
+ * Doc
+ */
def createModeration(
- input: String,
- settings: CreateModerationSettings = DefaultSettings.CreateModeration
+ input: String,
+ settings: CreateModerationSettings = DefaultSettings.CreateModeration
): Future[ModerationResponse]
- /**
- * Closes the underlying ws client, and releases all its resources.
- */
+ /** Closes the underlying ws client, and releases all its resources.
+ */
def close(): Unit
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceConsts.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceConsts.scala
index 65342d26..b444e5c7 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceConsts.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceConsts.scala
@@ -3,16 +3,15 @@ package io.cequence.openaiscala.service
import io.cequence.openaiscala.domain.ModelId
import io.cequence.openaiscala.domain.settings._
-/**
- * Constants of [[OpenAIService]], mostly defaults
- */
+/** Constants of [[OpenAIService]], mostly defaults
+ */
trait OpenAIServiceConsts {
protected val coreUrl = "https://api.openai.com/v1/"
- protected val defaultRequestTimeout = 120 * 1000 // two minute
+ protected val defaultRequestTimeout: Int = 120 * 1000 // two minute
- protected val defaultReadoutTimeout = 120 * 1000 // two minute
+ protected val defaultReadoutTimeout: Int = 120 * 1000 // two minute
protected val configPrefix = "openai-scala-client"
@@ -20,57 +19,61 @@ trait OpenAIServiceConsts {
object DefaultSettings {
- val CreateCompletion = CreateCompletionSettings(
+ val CreateCompletion: CreateCompletionSettings = CreateCompletionSettings(
model = ModelId.text_davinci_003,
temperature = Some(0.7),
max_tokens = Some(1000)
)
- val CreateChatCompletion = CreateChatCompletionSettings(
- model = ModelId.gpt_3_5_turbo,
- max_tokens = Some(1000)
- )
+ val CreateChatCompletion: CreateChatCompletionSettings =
+ CreateChatCompletionSettings(
+ model = ModelId.gpt_3_5_turbo,
+ max_tokens = Some(1000)
+ )
- val CreateChatFunCompletion = CreateChatCompletionSettings(
- model = ModelId.gpt_3_5_turbo_0613,
- max_tokens = Some(1000)
- )
+ val CreateChatFunCompletion: CreateChatCompletionSettings =
+ CreateChatCompletionSettings(
+ model = ModelId.gpt_3_5_turbo_0613,
+ max_tokens = Some(1000)
+ )
- val CreateEdit = CreateEditSettings(
+ val CreateEdit: CreateEditSettings = CreateEditSettings(
model = ModelId.text_davinci_edit_001,
temperature = Some(0.7)
)
// keep all OpenAI defaults
- val CreateImage = CreateImageSettings()
+ val CreateImage: CreateImageSettings = CreateImageSettings()
// keep all OpenAI defaults
- val CreateImageEdit = CreateImageSettings()
+ val CreateImageEdit: CreateImageSettings = CreateImageSettings()
// keep all OpenAI defaults
- val CreateImageVariation = CreateImageSettings()
+ val CreateImageVariation: CreateImageSettings = CreateImageSettings()
- val CreateEmbeddings = CreateEmbeddingsSettings(
+ val CreateEmbeddings: CreateEmbeddingsSettings = CreateEmbeddingsSettings(
model = ModelId.text_embedding_ada_002
)
- val CreateTranscription = CreateTranscriptionSettings(
- model = ModelId.whisper_1,
- language = Some("en")
- )
+ val CreateTranscription: CreateTranscriptionSettings =
+ CreateTranscriptionSettings(
+ model = ModelId.whisper_1,
+ language = Some("en")
+ )
- val CreateTranslation = CreateTranslationSettings(
- model = ModelId.whisper_1
- )
+ val CreateTranslation: CreateTranslationSettings =
+ CreateTranslationSettings(
+ model = ModelId.whisper_1
+ )
- val UploadFile = UploadFileSettings(
+ val UploadFile: UploadFileSettings = UploadFileSettings(
purpose = "fine-tune"
)
// keep all OpenAI defaults
- val CreateFineTune = CreateFineTuneSettings()
+ val CreateFineTune: CreateFineTuneSettings = CreateFineTuneSettings()
// keep all OpenAI defaults
- val CreateModeration = CreateModerationSettings()
+ val CreateModeration: CreateModerationSettings = CreateModerationSettings()
}
-}
\ No newline at end of file
+}
diff --git a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceWrapper.scala b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceWrapper.scala
index ffbc2130..35e5d83e 100644
--- a/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceWrapper.scala
+++ b/openai-core/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceWrapper.scala
@@ -1,175 +1,198 @@
package io.cequence.openaiscala.service
-import io.cequence.openaiscala.domain.{FunMessageSpec, FunctionSpec, MessageSpec}
+import io.cequence.openaiscala.domain.{
+ FunMessageSpec,
+ FunctionSpec,
+ MessageSpec
+}
import io.cequence.openaiscala.domain.settings._
import java.io.File
-import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.Future
+import io.cequence.openaiscala.domain.response.{
+ ChatCompletionResponse,
+ ChatFunCompletionResponse,
+ DeleteResponse,
+ EmbeddingResponse,
+ FileInfo,
+ FineTuneEvent,
+ FineTuneJob,
+ ImageInfo,
+ ModelInfo,
+ ModerationResponse,
+ TextCompletionResponse,
+ TextEditResponse,
+ TranscriptResponse
+}
trait OpenAIServiceWrapper extends OpenAIService {
- override def listModels = wrap(
+ override def listModels: Future[Seq[ModelInfo]] = wrap(
_.listModels
)
override def retrieveModel(
- modelId: String
- ) = wrap(
+ modelId: String
+ ): Future[Option[ModelInfo]] = wrap(
_.retrieveModel(modelId)
)
override def createCompletion(
- prompt: String,
- settings: CreateCompletionSettings
- ) = wrap(
+ prompt: String,
+ settings: CreateCompletionSettings
+ ): Future[TextCompletionResponse] = wrap(
_.createCompletion(prompt, settings)
)
override def createChatCompletion(
- messages: Seq[MessageSpec],
- settings: CreateChatCompletionSettings
- ) = wrap(
+ messages: Seq[MessageSpec],
+ settings: CreateChatCompletionSettings
+ ): Future[ChatCompletionResponse] = wrap(
_.createChatCompletion(messages, settings)
)
override def createChatFunCompletion(
- messages: Seq[FunMessageSpec],
- functions: Seq[FunctionSpec],
- responseFunctionName: Option[String],
- settings: CreateChatCompletionSettings
- ) = wrap(
- _.createChatFunCompletion(messages, functions, responseFunctionName, settings)
+ messages: Seq[FunMessageSpec],
+ functions: Seq[FunctionSpec],
+ responseFunctionName: Option[String],
+ settings: CreateChatCompletionSettings
+ ): Future[ChatFunCompletionResponse] = wrap(
+ _.createChatFunCompletion(
+ messages,
+ functions,
+ responseFunctionName,
+ settings
+ )
)
override def createEdit(
- input: String,
- instruction: String,
- settings: CreateEditSettings
- ) = wrap(
+ input: String,
+ instruction: String,
+ settings: CreateEditSettings
+ ): Future[TextEditResponse] = wrap(
_.createEdit(input, instruction, settings)
)
override def createImage(
- prompt: String,
- settings: CreateImageSettings
- ) = wrap(
+ prompt: String,
+ settings: CreateImageSettings
+ ): Future[ImageInfo] = wrap(
_.createImage(prompt, settings)
)
override def createImageEdit(
- prompt: String,
- image: File,
- mask: Option[File],
- settings: CreateImageSettings
- ) = wrap(
+ prompt: String,
+ image: File,
+ mask: Option[File],
+ settings: CreateImageSettings
+ ): Future[ImageInfo] = wrap(
_.createImageEdit(prompt, image, mask, settings)
)
override def createImageVariation(
- image: File,
- settings: CreateImageSettings
- ) = wrap(
+ image: File,
+ settings: CreateImageSettings
+ ): Future[ImageInfo] = wrap(
_.createImageVariation(image, settings)
)
override def createEmbeddings(
- input: Seq[String],
- settings: CreateEmbeddingsSettings
- ) = wrap(
+ input: Seq[String],
+ settings: CreateEmbeddingsSettings
+ ): Future[EmbeddingResponse] = wrap(
_.createEmbeddings(input, settings)
)
override def createAudioTranscription(
- file: File,
- prompt: Option[String],
- settings: CreateTranscriptionSettings
- ) = wrap(
+ file: File,
+ prompt: Option[String],
+ settings: CreateTranscriptionSettings
+ ): Future[TranscriptResponse] = wrap(
_.createAudioTranscription(file, prompt, settings)
)
override def createAudioTranslation(
- file: File,
- prompt: Option[String],
- settings: CreateTranslationSettings
- ) = wrap(
+ file: File,
+ prompt: Option[String],
+ settings: CreateTranslationSettings
+ ): Future[TranscriptResponse] = wrap(
_.createAudioTranslation(file, prompt, settings)
)
- override def listFiles = wrap(
+ override def listFiles: Future[Seq[FileInfo]] = wrap(
_.listFiles
)
override def uploadFile(
- file: File,
- displayFileName: Option[String],
- settings: UploadFileSettings
- ) = wrap(
+ file: File,
+ displayFileName: Option[String],
+ settings: UploadFileSettings
+ ): Future[FileInfo] = wrap(
_.uploadFile(file, displayFileName, settings)
)
override def deleteFile(
- fileId: String
- ) = wrap(
+ fileId: String
+ ): Future[DeleteResponse] = wrap(
_.deleteFile(fileId)
)
override def retrieveFile(
- fileId: String
- ) = wrap(
+ fileId: String
+ ): Future[Option[FileInfo]] = wrap(
_.retrieveFile(fileId)
)
override def retrieveFileContent(
- fileId: String
- ) = wrap(
+ fileId: String
+ ): Future[Option[String]] = wrap(
_.retrieveFileContent(fileId)
)
override def createFineTune(
- training_file: String,
- validation_file: Option[String],
- settings: CreateFineTuneSettings
- ) = wrap(
+ training_file: String,
+ validation_file: Option[String],
+ settings: CreateFineTuneSettings
+ ): Future[FineTuneJob] = wrap(
_.createFineTune(training_file, validation_file, settings)
)
- override def listFineTunes = wrap(
+ override def listFineTunes: Future[Seq[FineTuneJob]] = wrap(
_.listFineTunes
)
override def retrieveFineTune(
- fineTuneId: String
- ) = wrap(
+ fineTuneId: String
+ ): Future[Option[FineTuneJob]] = wrap(
_.retrieveFineTune(fineTuneId)
)
override def cancelFineTune(
- fineTuneId: String
- ) = wrap(
+ fineTuneId: String
+ ): Future[Option[FineTuneJob]] = wrap(
_.cancelFineTune(fineTuneId)
)
override def listFineTuneEvents(
- fineTuneId: String
- ) = wrap(
+ fineTuneId: String
+ ): Future[Option[Seq[FineTuneEvent]]] = wrap(
_.listFineTuneEvents(fineTuneId)
)
override def deleteFineTuneModel(
- modelId: String
- ) = wrap(
+ modelId: String
+ ): Future[DeleteResponse] = wrap(
_.deleteFineTuneModel(modelId)
)
override def createModeration(
- input: String,
- settings: CreateModerationSettings
- ) = wrap(
+ input: String,
+ settings: CreateModerationSettings
+ ): Future[ModerationResponse] = wrap(
_.createModeration(input, settings)
)
protected def wrap[T](
- fun: OpenAIService => Future[T]
+ fun: OpenAIService => Future[T]
): Future[T]
-}
\ No newline at end of file
+}
diff --git a/openai-guice/build.sbt b/openai-guice/build.sbt
index b3b0c865..3f756e6d 100644
--- a/openai-guice/build.sbt
+++ b/openai-guice/build.sbt
@@ -4,4 +4,4 @@ description := "Guice/DI for OpenAI Scala Client"
libraryDependencies ++= Seq(
"net.codingwell" %% "scala-guice" % "5.1.1"
-)
\ No newline at end of file
+)
diff --git a/openai-guice/src/main/scala/io/cequence/openaiscala/service/AkkaModule.scala b/openai-guice/src/main/scala/io/cequence/openaiscala/service/AkkaModule.scala
index e74b409a..d2e8842b 100644
--- a/openai-guice/src/main/scala/io/cequence/openaiscala/service/AkkaModule.scala
+++ b/openai-guice/src/main/scala/io/cequence/openaiscala/service/AkkaModule.scala
@@ -2,7 +2,7 @@ package io.cequence.openaiscala.service
import akka.actor.ActorSystem
import akka.stream.Materializer
-import com.google.inject.{AbstractModule, Injector, Provider}
+import com.google.inject.{AbstractModule, Provider}
import com.typesafe.config.Config
import net.codingwell.scalaguice.ScalaModule
@@ -13,27 +13,40 @@ object Providers {
private val name = "main-actor-system"
- class ActorSystemProvider @Inject()(config: Config) extends Provider[ActorSystem] {
- override def get = ActorSystem(name, config)
+ class ActorSystemProvider @Inject() (config: Config)
+ extends Provider[ActorSystem] {
+ override def get: ActorSystem = ActorSystem(name, config)
}
- class MaterializerProvider @Inject()(system: ActorSystem) extends Provider[Materializer] {
- override def get = Materializer(system)
+ class MaterializerProvider @Inject() (system: ActorSystem)
+ extends Provider[Materializer] {
+ override def get: Materializer = Materializer(system)
}
- class BlockingDispatchedExecutionContextProvider @Inject()(system: ActorSystem) extends Provider[ExecutionContext] {
- override def get: ExecutionContext = system.dispatchers.lookup("blocking-dispatcher")
+ class BlockingDispatchedExecutionContextProvider @Inject() (
+ system: ActorSystem
+ ) extends Provider[ExecutionContext] {
+ override def get: ExecutionContext =
+ system.dispatchers.lookup("blocking-dispatcher")
}
}
-class AkkaModule(includeExecutionContext: Boolean = true) extends AbstractModule with ScalaModule {
+class AkkaModule(includeExecutionContext: Boolean = true)
+ extends AbstractModule
+ with ScalaModule {
- override def configure = {
- bind[ActorSystem].toProvider[Providers.ActorSystemProvider].asEagerSingleton()
- bind[Materializer].toProvider[Providers.MaterializerProvider].asEagerSingleton()
+ override def configure: Unit = {
+ bind[ActorSystem]
+ .toProvider[Providers.ActorSystemProvider]
+ .asEagerSingleton()
+ bind[Materializer]
+ .toProvider[Providers.MaterializerProvider]
+ .asEagerSingleton()
if (includeExecutionContext) {
- bind[ExecutionContext].toProvider[Providers.BlockingDispatchedExecutionContextProvider].asEagerSingleton()
+ bind[ExecutionContext]
+ .toProvider[Providers.BlockingDispatchedExecutionContextProvider]
+ .asEagerSingleton()
}
}
}
diff --git a/openai-guice/src/main/scala/io/cequence/openaiscala/service/BaseOpenAIClientApp.scala b/openai-guice/src/main/scala/io/cequence/openaiscala/service/BaseOpenAIClientApp.scala
index 15449f38..a5e21933 100644
--- a/openai-guice/src/main/scala/io/cequence/openaiscala/service/BaseOpenAIClientApp.scala
+++ b/openai-guice/src/main/scala/io/cequence/openaiscala/service/BaseOpenAIClientApp.scala
@@ -2,23 +2,23 @@ package io.cequence.openaiscala.service
import akka.actor.ActorSystem
import akka.stream.Materializer
-import org.slf4j.LoggerFactory
+import com.google.inject.AbstractModule
import scala.concurrent.ExecutionContext
trait BaseOpenAIClientApp extends GuiceContainer with App {
+ protected val openAIService: OpenAIService = instance[OpenAIService]
// modules
- override protected def modules = Seq(
+ override protected def modules: Seq[AbstractModule] = Seq(
new ConfigModule(),
new AkkaModule(),
new ServiceModule()
)
- protected val openAIService = instance[OpenAIService]
-
// implicits
protected implicit val system: ActorSystem = instance[ActorSystem]
protected implicit val materializer: Materializer = instance[Materializer]
- protected implicit val executionContext: ExecutionContext = materializer.executionContext
-}
\ No newline at end of file
+ protected implicit val executionContext: ExecutionContext =
+ materializer.executionContext
+}
diff --git a/openai-guice/src/main/scala/io/cequence/openaiscala/service/ConfigModule.scala b/openai-guice/src/main/scala/io/cequence/openaiscala/service/ConfigModule.scala
index 1012b4e8..826c74de 100644
--- a/openai-guice/src/main/scala/io/cequence/openaiscala/service/ConfigModule.scala
+++ b/openai-guice/src/main/scala/io/cequence/openaiscala/service/ConfigModule.scala
@@ -7,13 +7,13 @@ import net.codingwell.scalaguice.ScalaModule
object ConfigModule {
class ConfigProvider extends Provider[Config] {
- override def get = ConfigFactory.load()
+ override def get: Config = ConfigFactory.load()
}
}
class ConfigModule extends AbstractModule with ScalaModule {
- override def configure = {
+ override def configure: Unit = {
bind[Config].toProvider[ConfigProvider].asEagerSingleton()
}
-}
\ No newline at end of file
+}
diff --git a/openai-guice/src/main/scala/io/cequence/openaiscala/service/GuiceContainer.scala b/openai-guice/src/main/scala/io/cequence/openaiscala/service/GuiceContainer.scala
index 88baf45a..e49807eb 100644
--- a/openai-guice/src/main/scala/io/cequence/openaiscala/service/GuiceContainer.scala
+++ b/openai-guice/src/main/scala/io/cequence/openaiscala/service/GuiceContainer.scala
@@ -1,25 +1,21 @@
package io.cequence.openaiscala.service
import akka.actor.ActorSystem
-import com.google.inject.{Guice, Injector, Module}
+import com.google.inject.{AbstractModule, Guice, Injector}
import com.typesafe.config.Config
-import net.codingwell.scalaguice.InjectorExtensions._
-import scala.concurrent.duration._
+import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
+import scala.reflect.ClassTag
trait GuiceContainer {
- protected def modules: Seq[Module]
-
- protected lazy val injector: Injector = Guice.createInjector(modules :_*)
+ protected lazy val injector: Injector = Guice.createInjector(modules: _*)
+ protected lazy val config: Config = instance[Config]
- protected lazy val config = instance[Config]
+ protected def modules: Seq[AbstractModule]
- // TODO: for Scala3 this function has to be "inlined"
- protected def instance[T: Manifest] = injector.instance[T]
-
- protected def result[T](future: Future[T]) =
+ protected def result[T](future: Future[T]): T =
Await.result(future, 100.minutes)
protected def terminate: Unit = {
@@ -27,4 +23,8 @@ trait GuiceContainer {
system.terminate
Await.result(system.whenTerminated, 1.day)
}
+
+ protected def instance[T: ClassTag]: T = injector.getInstance(
+ implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]
+ )
}
diff --git a/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIExampleApp.scala b/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIExampleApp.scala
index a7151341..19b9e2ce 100644
--- a/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIExampleApp.scala
+++ b/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIExampleApp.scala
@@ -2,11 +2,13 @@ package io.cequence.openaiscala.service
object OpenAIExampleApp extends BaseOpenAIClientApp {
- openAIService.listModels.map(
- _.foreach(println)
- ).onComplete { _ =>
- openAIService.close()
- system.terminate
- System.exit(0)
- }
+ openAIService.listModels
+ .map(
+ _.foreach(println)
+ )
+ .onComplete { _ =>
+ openAIService.close()
+ system.terminate
+ System.exit(0)
+ }
}
diff --git a/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceProvider.scala b/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceProvider.scala
index bad935d6..0c685d91 100644
--- a/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceProvider.scala
+++ b/openai-guice/src/main/scala/io/cequence/openaiscala/service/OpenAIServiceProvider.scala
@@ -6,10 +6,10 @@ import com.typesafe.config.Config
import javax.inject.{Inject, Provider}
import scala.concurrent.ExecutionContext
-private class OpenAIServiceProvider @Inject()(
- config: Config)(
- implicit ec: ExecutionContext, materializer: Materializer
+private class OpenAIServiceProvider @Inject() (config: Config)(implicit
+ ec: ExecutionContext,
+ materializer: Materializer
) extends Provider[OpenAIService] {
- override def get = OpenAIServiceFactory(config)
-}
\ No newline at end of file
+ override def get: OpenAIService = OpenAIServiceFactory(config)
+}
diff --git a/openai-guice/src/main/scala/io/cequence/openaiscala/service/ServiceModule.scala b/openai-guice/src/main/scala/io/cequence/openaiscala/service/ServiceModule.scala
index f5d57009..616d3ab9 100644
--- a/openai-guice/src/main/scala/io/cequence/openaiscala/service/ServiceModule.scala
+++ b/openai-guice/src/main/scala/io/cequence/openaiscala/service/ServiceModule.scala
@@ -5,7 +5,9 @@ import net.codingwell.scalaguice.ScalaModule
class ServiceModule extends AbstractModule with ScalaModule {
- override def configure = {
- bind[OpenAIService].toProvider(classOf[OpenAIServiceProvider]).asEagerSingleton
+ override def configure: Unit = {
+ bind[OpenAIService]
+ .toProvider(classOf[OpenAIServiceProvider])
+ .asEagerSingleton
}
-}
\ No newline at end of file
+}
diff --git a/project/plugins.sbt b/project/plugins.sbt
index 3135b56d..0f01c415 100755
--- a/project/plugins.sbt
+++ b/project/plugins.sbt
@@ -5,7 +5,6 @@ addSbtPlugin("com.github.sbt" % "sbt-pgp" % "2.1.2")
addSbtPlugin("ch.epfl.scala" % "sbt-scalafix" % "0.11.0")
addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.0")
-
// Test Coverage plugin.
// ~
// sbt-scoverage is a plugin for SBT that integrates the scoverage code coverage library.