diff --git a/Demo/DemoChat/Sources/MiscStore.swift b/Demo/DemoChat/Sources/MiscStore.swift index 4693e423..6829744d 100644 --- a/Demo/DemoChat/Sources/MiscStore.swift +++ b/Demo/DemoChat/Sources/MiscStore.swift @@ -63,30 +63,24 @@ public final class MiscStore: ObservableObject { func circleEmoji(for resultType: Bool) -> String { resultType ? "🔴" : "🟢" } - - for result in categoryResults { - let content = """ - \(circleEmoji(for: result.categories.hate)) Hate - \(circleEmoji(for: result.categories.hateThreatening)) Hate/Threatening - \(circleEmoji(for: result.categories.selfHarm)) Self-harm - \(circleEmoji(for: result.categories.sexual)) Sexual - \(circleEmoji(for: result.categories.sexualMinors)) Sexual/Minors - \(circleEmoji(for: result.categories.violence)) Violence - \(circleEmoji(for: result.categories.violenceGraphic)) Violence/Graphic - """ - + + categoryResults.forEach { categoryResult in + let content = categoryResult.categories.map { (label, value) in + return "\(circleEmoji(for: value)) \(label)" + } + let message = Message( id: response.id, role: .assistant, - content: content, + content: content.joined(separator: "\n"), createdAt: message.createdAt) - + if existingMessages.contains(message) { - continue + return } moderationConversation.messages.append(message) } - + } catch { moderationConversationError = error } diff --git a/Sources/OpenAI/OpenAI.swift b/Sources/OpenAI/OpenAI.swift index 49715c8e..199e3eb3 100644 --- a/Sources/OpenAI/OpenAI.swift +++ b/Sources/OpenAI/OpenAI.swift @@ -100,6 +100,7 @@ final public class OpenAI: OpenAIProtocol { performRequest(request: JSONRequest(url: buildURL(path: .models), method: "GET"), completion: completion) } + @available(iOS 13.0, *) public func moderations(query: ModerationsQuery, completion: @escaping (Result) -> Void) { performRequest(request: JSONRequest(body: query, url: buildURL(path: .moderations)), completion: completion) } diff --git a/Sources/OpenAI/Public/Models/ModerationsResult.swift b/Sources/OpenAI/Public/Models/ModerationsResult.swift index 924baebd..a7c25aef 100644 --- a/Sources/OpenAI/Public/Models/ModerationsResult.swift +++ b/Sources/OpenAI/Public/Models/ModerationsResult.swift @@ -7,17 +7,27 @@ import Foundation +@available(iOS 13.0, *) public struct ModerationsResult: Codable, Equatable { - public struct CategoryResult: Codable, Equatable { + public struct Moderation: Codable, Equatable { - public struct Categories: Codable, Equatable { + public struct Categories: Codable, Equatable, Sequence { + + /// Content that expresses, incites, or promotes harassing language towards any target. + public let harassment: Bool + /// Harassment content that also includes violence or serious harm towards any target. + public let harassmentThreatening: Bool /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. public let hate: Bool /// Hateful content that also includes violence or serious harm towards the targeted group. public let hateThreatening: Bool /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. public let selfHarm: Bool + /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + public let selfHarmIntent: Bool + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + public let selfHarmInstructions: Bool /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). public let sexual: Bool /// Sexual content that includes an individual who is under 18 years old. @@ -26,25 +36,44 @@ public struct ModerationsResult: Codable, Equatable { public let violence: Bool /// Violent content that depicts death, violence, or serious physical injury in extreme graphic detail. public let violenceGraphic: Bool - - enum CodingKeys: String, CodingKey { + + public enum CodingKeys: String, CodingKey, CaseIterable { + case harassment + case harassmentThreatening = "harassment/threatening" case hate case hateThreatening = "hate/threatening" case selfHarm = "self-harm" + case selfHarmIntent = "self-harm/intent" + case selfHarmInstructions = "self-harm/instructions" case sexual case sexualMinors = "sexual/minors" case violence case violenceGraphic = "violence/graphic" } + + public func makeIterator() -> IndexingIterator<[(String, Bool)]> { + return Mirror(reflecting: self).children.enumerated().map { (index, element) in + return (CodingKeys.allCases[index].stringValue, element.value) as! (String, Bool) + }.makeIterator() + } } - - public struct CategoryScores: Codable, Equatable { + + public struct CategoryScores: Codable, Equatable, Sequence { + + /// Content that expresses, incites, or promotes harassing language towards any target. + public let harassment: Double + /// Harassment content that also includes violence or serious harm towards any target. + public let harassmentThreatening: Double /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. public let hate: Double /// Hateful content that also includes violence or serious harm towards the targeted group. public let hateThreatening: Double /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. public let selfHarm: Double + /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + public let selfHarmIntent: Double + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + public let selfHarmInstructions: Double /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). public let sexual: Double /// Sexual content that includes an individual who is under 18 years old. @@ -53,33 +82,46 @@ public struct ModerationsResult: Codable, Equatable { public let violence: Double /// Violent content that depicts death, violence, or serious physical injury in extreme graphic detail. public let violenceGraphic: Double - - enum CodingKeys: String, CodingKey { + + public enum CodingKeys: String, CodingKey, CaseIterable { + case harassment + case harassmentThreatening = "harassment/threatening" case hate case hateThreatening = "hate/threatening" case selfHarm = "self-harm" + case selfHarmIntent = "self-harm/intent" + case selfHarmInstructions = "self-harm/instructions" case sexual case sexualMinors = "sexual/minors" case violence case violenceGraphic = "violence/graphic" } + + public func makeIterator() -> IndexingIterator<[(String, Bool)]> { + return Mirror(reflecting: self).children.enumerated().map { (index, element) in + return (CodingKeys.allCases[index].stringValue, element.value) as! (String, Bool) + }.makeIterator() + } } - + /// Collection of per-category binary usage policies violation flags. For each category, the value is true if the model flags the corresponding category as violated, false otherwise. public let categories: Categories /// Collection of per-category raw scores output by the model, denoting the model's confidence that the input violates the OpenAI's policy for the category. The value is between 0 and 1, where higher values denote higher confidence. The scores should not be interpreted as probabilities. public let categoryScores: CategoryScores /// True if the model classifies the content as violating OpenAI's usage policies, false otherwise. public let flagged: Bool - + enum CodingKeys: String, CodingKey { case categories case categoryScores = "category_scores" case flagged } } - + public let id: String public let model: Model - public let results: [CategoryResult] + public let results: [Self.Moderation] } + +@available(iOS 13.0, *) +extension ModerationsResult: Identifiable {} diff --git a/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift b/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift index 8c65b190..e7cb1aa0 100644 --- a/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift +++ b/Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift @@ -211,6 +211,7 @@ public protocol OpenAIProtocol { - query: A `ModerationsQuery` object containing the input parameters for the API request. This includes the input text and optionally the model to be used. - completion: A closure which receives the result when the API request finishes. The closure's parameter, `Result`, will contain either the `ModerationsResult` object with the list of category results, or an error if the request failed. **/ + @available(iOS 13.0, *) func moderations(query: ModerationsQuery, completion: @escaping (Result) -> Void) /** diff --git a/Tests/OpenAITests/OpenAITests.swift b/Tests/OpenAITests/OpenAITests.swift index 7285dd16..b98c3caa 100644 --- a/Tests/OpenAITests/OpenAITests.swift +++ b/Tests/OpenAITests/OpenAITests.swift @@ -245,8 +245,8 @@ class OpenAITests: XCTestCase { func testModerations() async throws { let query = ModerationsQuery(input: "Hello, world!") let moderationsResult = ModerationsResult(id: "foo", model: .moderation, results: [ - .init(categories: .init(hate: false, hateThreatening: false, selfHarm: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false), - categoryScores: .init(hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1), + .init(categories: .init(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: false, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false), + categoryScores: .init(harassment: 0.1, harassmentThreatening: 0.1, hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, selfHarmIntent: 0.1, selfHarmInstructions: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1), flagged: false) ]) try self.stub(result: moderationsResult) @@ -254,7 +254,21 @@ class OpenAITests: XCTestCase { let result = try await openAI.moderations(query: query) XCTAssertEqual(result, moderationsResult) } - + + func testModerationsIterable() { + let categories = ModerationsResult.Moderation.Categories(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: false, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false) + Mirror(reflecting: categories).children.enumerated().forEach { index, element in + let label = ModerationsResult.Moderation.Categories.CodingKeys.allCases[index].stringValue.replacing(try! Regex("[/-]"), with: { _ in "" }) + XCTAssertEqual(label, element.label!.lowercased()) + } + + let categoryScores = ModerationsResult.Moderation.CategoryScores(harassment: 0.1, harassmentThreatening: 0.1, hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, selfHarmIntent: 0.1, selfHarmInstructions: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1) + Mirror(reflecting: categoryScores).children.enumerated().forEach { index, element in + let label = ModerationsResult.Moderation.CategoryScores.CodingKeys.allCases[index].stringValue.replacing(try! Regex("[/-]"), with: { _ in "" }) + XCTAssertEqual(label, element.label!.lowercased()) + } + } + func testModerationsError() async throws { let query = ModerationsQuery(input: "Hello, world!") let inError = APIError(message: "foo", type: "bar", param: "baz", code: "100") diff --git a/Tests/OpenAITests/OpenAITestsCombine.swift b/Tests/OpenAITests/OpenAITestsCombine.swift index b7918b44..e49ab3d7 100644 --- a/Tests/OpenAITests/OpenAITestsCombine.swift +++ b/Tests/OpenAITests/OpenAITestsCombine.swift @@ -94,8 +94,8 @@ final class OpenAITestsCombine: XCTestCase { func testModerations() throws { let query = ModerationsQuery(input: "Hello, world!") let moderationsResult = ModerationsResult(id: "foo", model: .moderation, results: [ - .init(categories: .init(hate: false, hateThreatening: false, selfHarm: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false), - categoryScores: .init(hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1), + .init(categories: .init(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: false, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false), + categoryScores: .init(harassment: 0.1, harassmentThreatening: 0.1, hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, selfHarmIntent: 0.1, selfHarmInstructions: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1), flagged: false) ]) try self.stub(result: moderationsResult) diff --git a/Tests/OpenAITests/OpenAITestsDecoder.swift b/Tests/OpenAITests/OpenAITestsDecoder.swift index d9672c04..ece078eb 100644 --- a/Tests/OpenAITests/OpenAITestsDecoder.swift +++ b/Tests/OpenAITests/OpenAITestsDecoder.swift @@ -364,18 +364,26 @@ class OpenAITestsDecoder: XCTestCase { "results": [ { "categories": { + "harassment": false, + "harassment/threatening": false, "hate": false, "hate/threatening": true, "self-harm": false, + "self-harm/intent": false, + "self-harm/instructions": false, "sexual": false, "sexual/minors": false, "violence": true, "violence/graphic": false }, "category_scores": { + "harassment": 0.0431830403405153, + "harassment/threatening": 0.1229622494034651, "hate": 0.22714105248451233, "hate/threatening": 0.4132447838783264, "self-harm": 0.00523239187896251, + "self-harm/intent": 0.307237106114835, + "self-harm/instructions": 0.42189350703096, "sexual": 0.01407341007143259, "sexual/minors": 0.0038522258400917053, "violence": 0.9223177433013916, @@ -388,8 +396,8 @@ class OpenAITestsDecoder: XCTestCase { """ let expectedValue = ModerationsResult(id: "modr-5MWoLO", model: .moderation, results: [ - .init(categories: .init(hate: false, hateThreatening: true, selfHarm: false, sexual: false, sexualMinors: false, violence: true, violenceGraphic: false), - categoryScores: .init(hate: 0.22714105248451233, hateThreatening: 0.4132447838783264, selfHarm: 0.00523239187896251, sexual: 0.01407341007143259, sexualMinors: 0.0038522258400917053, violence: 0.9223177433013916, violenceGraphic: 0.036865197122097015), + .init(categories: .init(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: true, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: true, violenceGraphic: false), + categoryScores: .init(harassment: 0.0431830403405153, harassmentThreatening: 0.1229622494034651, hate: 0.22714105248451233, hateThreatening: 0.4132447838783264, selfHarm: 0.00523239187896251, selfHarmIntent: 0.307237106114835, selfHarmInstructions: 0.42189350703096, sexual: 0.01407341007143259, sexualMinors: 0.0038522258400917053, violence: 0.9223177433013916, violenceGraphic: 0.036865197122097015), flagged: true) ]) try decode(data, expectedValue)