Skip to content

Commit e066c2a

Browse files
Update README.md
1 parent 27a2c31 commit e066c2a

File tree

1 file changed

+108
-1
lines changed

1 file changed

+108
-1
lines changed

README.md

+108-1
Original file line numberDiff line numberDiff line change
@@ -1267,10 +1267,117 @@ let modelID = "gpt-3.5-turbo-instruct"
12671267
let retrievedModel = try await service.retrieveModelWith(id: modelID)
12681268
```
12691269
```swift
1270-
/// Delete fins tune model
1270+
/// Delete fine tuned model
12711271
let modelID = "fine-tune-model-id"
12721272
let deletionStatus = try await service.deleteFineTuneModelWith(id: modelID)
12731273
```
12741274
### Moderations
1275+
Parameters
1276+
```swift
1277+
/// [Classifies if text violates OpenAI's Content Policy.](https://platform.openai.com/docs/api-reference/moderations/create)
1278+
public struct ModerationParameter<Input: Encodable>: Encodable {
1279+
1280+
/// The input text to classify, string or array.
1281+
let input: Input
1282+
/// Two content moderations models are available: text-moderation-stable and text-moderation-latest.
1283+
/// The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.
1284+
let model: String?
1285+
1286+
enum Model: String {
1287+
case stable = "text-moderation-stable"
1288+
case latest = "text-moderation-latest"
1289+
}
1290+
1291+
init(
1292+
input: Input,
1293+
model: Model? = nil)
1294+
{
1295+
self.input = input
1296+
self.model = model?.rawValue
1297+
}
1298+
}
1299+
```
1300+
Response
1301+
```swift
1302+
/// The [moderation object](https://platform.openai.com/docs/api-reference/moderations/object). Represents policy compliance report by OpenAI's content moderation model against a given input.
1303+
public struct ModerationObject: Decodable {
1304+
1305+
/// The unique identifier for the moderation request.
1306+
public let id: String
1307+
/// The model used to generate the moderation results.
1308+
public let model: String
1309+
/// A list of moderation objects.
1310+
public let results: [Moderation]
1311+
1312+
public struct Moderation: Decodable {
1313+
1314+
/// Whether the content violates OpenAI's usage policies.
1315+
public let flagged: Bool
1316+
/// A list of the categories, and whether they are flagged or not.
1317+
public let categories: Category<Bool>
1318+
/// A list of the categories along with their scores as predicted by model.
1319+
public let categoryScores: Category<Double>
1320+
1321+
public struct Category<T: Decodable>: Decodable {
1322+
1323+
/// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment.
1324+
public let hate: T
1325+
/// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
1326+
public let hateThreatening: T
1327+
/// Content that expresses, incites, or promotes harassing language towards any target.
1328+
public let harassment: T
1329+
/// Harassment content that also includes violence or serious harm towards any target.
1330+
public let harassmentThreatening: T
1331+
/// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
1332+
public let selfHarm: T
1333+
/// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
1334+
public let selfHarmIntent: T
1335+
/// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
1336+
public let selfHarmInstructions: T
1337+
/// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
1338+
public let sexual: T
1339+
/// Sexual content that includes an individual who is under 18 years old.
1340+
public let sexualMinors: T
1341+
/// Content that depicts death, violence, or physical injury.
1342+
public let violence: T
1343+
/// Content that depicts death, violence, or physical injury in graphic detail.
1344+
public let violenceGraphic: T
1345+
1346+
enum CodingKeys: String, CodingKey {
1347+
case hate
1348+
case hateThreatening = "hate/threatening"
1349+
case harassment
1350+
case harassmentThreatening = "harassment/threatening"
1351+
case selfHarm = "self-harm"
1352+
case selfHarmIntent = "self-harm/intent"
1353+
case selfHarmInstructions = "self-harm/instructions"
1354+
case sexual
1355+
case sexualMinors = "sexual/minors"
1356+
case violence
1357+
case violenceGraphic = "violence/graphic"
1358+
}
1359+
}
1360+
1361+
enum CodingKeys: String, CodingKey {
1362+
case categories
1363+
case categoryScores = "category_scores"
1364+
case flagged
1365+
}
1366+
}
1367+
}
1368+
```
1369+
Usage
1370+
```swift
1371+
/// Single prompt
1372+
let prompt = "I am going to kill him"
1373+
let parameters = ModerationParameter(input: prompt)
1374+
let isFlagged = try await service.createModerationFromText(parameters: parameters)
1375+
```
1376+
```swift
1377+
/// Multiple prompts
1378+
let prompts = ["I am going to kill him", "I am going to die"]
1379+
let parameters = ModerationParameter(input: prompts)
1380+
let isFlagged = try await service.createModerationFromTexts(parameters: parameters)
1381+
```
12751382

12761383

0 commit comments

Comments
 (0)