Skip to content

Commit

Permalink
Merge pull request #162 from kalafus/MacPaw.moderations_result_updates
Browse files Browse the repository at this point in the history
update moderations results keywords, abstract display of moderations keywords
  • Loading branch information
ingvarus-bc committed Feb 21, 2024
2 parents 224f4ef + 4b69b8e commit edf8830
Show file tree
Hide file tree
Showing 7 changed files with 95 additions and 35 deletions.
26 changes: 10 additions & 16 deletions Demo/DemoChat/Sources/MiscStore.swift
Original file line number Diff line number Diff line change
Expand Up @@ -63,30 +63,24 @@ public final class MiscStore: ObservableObject {
func circleEmoji(for resultType: Bool) -> String {
resultType ? "🔴" : "🟢"
}

for result in categoryResults {
let content = """
\(circleEmoji(for: result.categories.hate)) Hate
\(circleEmoji(for: result.categories.hateThreatening)) Hate/Threatening
\(circleEmoji(for: result.categories.selfHarm)) Self-harm
\(circleEmoji(for: result.categories.sexual)) Sexual
\(circleEmoji(for: result.categories.sexualMinors)) Sexual/Minors
\(circleEmoji(for: result.categories.violence)) Violence
\(circleEmoji(for: result.categories.violenceGraphic)) Violence/Graphic
"""


categoryResults.forEach { categoryResult in
let content = categoryResult.categories.map { (label, value) in
return "\(circleEmoji(for: value)) \(label)"
}

let message = Message(
id: response.id,
role: .assistant,
content: content,
content: content.joined(separator: "\n"),
createdAt: message.createdAt)

if existingMessages.contains(message) {
continue
return
}
moderationConversation.messages.append(message)
}

} catch {
moderationConversationError = error
}
Expand Down
1 change: 1 addition & 0 deletions Sources/OpenAI/OpenAI.swift
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ final public class OpenAI: OpenAIProtocol {
performRequest(request: JSONRequest<ModelsResult>(url: buildURL(path: .models), method: "GET"), completion: completion)
}

@available(iOS 13.0, *)
public func moderations(query: ModerationsQuery, completion: @escaping (Result<ModerationsResult, Error>) -> Void) {
performRequest(request: JSONRequest<ModerationsResult>(body: query, url: buildURL(path: .moderations)), completion: completion)
}
Expand Down
66 changes: 54 additions & 12 deletions Sources/OpenAI/Public/Models/ModerationsResult.swift
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,27 @@

import Foundation

@available(iOS 13.0, *)
public struct ModerationsResult: Codable, Equatable {

public struct CategoryResult: Codable, Equatable {
public struct Moderation: Codable, Equatable {

public struct Categories: Codable, Equatable {
public struct Categories: Codable, Equatable, Sequence {

/// Content that expresses, incites, or promotes harassing language towards any target.
public let harassment: Bool
/// Harassment content that also includes violence or serious harm towards any target.
public let harassmentThreatening: Bool
/// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
public let hate: Bool
/// Hateful content that also includes violence or serious harm towards the targeted group.
public let hateThreatening: Bool
/// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
public let selfHarm: Bool
/// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
public let selfHarmIntent: Bool
/// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
public let selfHarmInstructions: Bool
/// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
public let sexual: Bool
/// Sexual content that includes an individual who is under 18 years old.
Expand All @@ -26,25 +36,44 @@ public struct ModerationsResult: Codable, Equatable {
public let violence: Bool
/// Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
public let violenceGraphic: Bool

enum CodingKeys: String, CodingKey {

public enum CodingKeys: String, CodingKey, CaseIterable {
case harassment
case harassmentThreatening = "harassment/threatening"
case hate
case hateThreatening = "hate/threatening"
case selfHarm = "self-harm"
case selfHarmIntent = "self-harm/intent"
case selfHarmInstructions = "self-harm/instructions"
case sexual
case sexualMinors = "sexual/minors"
case violence
case violenceGraphic = "violence/graphic"
}

public func makeIterator() -> IndexingIterator<[(String, Bool)]> {
return Mirror(reflecting: self).children.enumerated().map { (index, element) in
return (CodingKeys.allCases[index].stringValue, element.value) as! (String, Bool)
}.makeIterator()
}
}

public struct CategoryScores: Codable, Equatable {

public struct CategoryScores: Codable, Equatable, Sequence {

/// Content that expresses, incites, or promotes harassing language towards any target.
public let harassment: Double
/// Harassment content that also includes violence or serious harm towards any target.
public let harassmentThreatening: Double
/// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
public let hate: Double
/// Hateful content that also includes violence or serious harm towards the targeted group.
public let hateThreatening: Double
/// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
public let selfHarm: Double
/// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
public let selfHarmIntent: Double
/// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
public let selfHarmInstructions: Double
/// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
public let sexual: Double
/// Sexual content that includes an individual who is under 18 years old.
Expand All @@ -53,33 +82,46 @@ public struct ModerationsResult: Codable, Equatable {
public let violence: Double
/// Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
public let violenceGraphic: Double

enum CodingKeys: String, CodingKey {

public enum CodingKeys: String, CodingKey, CaseIterable {
case harassment
case harassmentThreatening = "harassment/threatening"
case hate
case hateThreatening = "hate/threatening"
case selfHarm = "self-harm"
case selfHarmIntent = "self-harm/intent"
case selfHarmInstructions = "self-harm/instructions"
case sexual
case sexualMinors = "sexual/minors"
case violence
case violenceGraphic = "violence/graphic"
}

public func makeIterator() -> IndexingIterator<[(String, Bool)]> {
return Mirror(reflecting: self).children.enumerated().map { (index, element) in
return (CodingKeys.allCases[index].stringValue, element.value) as! (String, Bool)
}.makeIterator()
}
}

/// Collection of per-category binary usage policies violation flags. For each category, the value is true if the model flags the corresponding category as violated, false otherwise.
public let categories: Categories
/// Collection of per-category raw scores output by the model, denoting the model's confidence that the input violates the OpenAI's policy for the category. The value is between 0 and 1, where higher values denote higher confidence. The scores should not be interpreted as probabilities.
public let categoryScores: CategoryScores
/// True if the model classifies the content as violating OpenAI's usage policies, false otherwise.
public let flagged: Bool

enum CodingKeys: String, CodingKey {
case categories
case categoryScores = "category_scores"
case flagged
}
}

public let id: String
public let model: Model
public let results: [CategoryResult]
public let results: [Self.Moderation]
}

@available(iOS 13.0, *)
extension ModerationsResult: Identifiable {}
1 change: 1 addition & 0 deletions Sources/OpenAI/Public/Protocols/OpenAIProtocol.swift
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ public protocol OpenAIProtocol {
- query: A `ModerationsQuery` object containing the input parameters for the API request. This includes the input text and optionally the model to be used.
- completion: A closure which receives the result when the API request finishes. The closure's parameter, `Result<ModerationsResult, Error>`, will contain either the `ModerationsResult` object with the list of category results, or an error if the request failed.
**/
@available(iOS 13.0, *)
func moderations(query: ModerationsQuery, completion: @escaping (Result<ModerationsResult, Error>) -> Void)

/**
Expand Down
20 changes: 17 additions & 3 deletions Tests/OpenAITests/OpenAITests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -245,16 +245,30 @@ class OpenAITests: XCTestCase {
func testModerations() async throws {
let query = ModerationsQuery(input: "Hello, world!")
let moderationsResult = ModerationsResult(id: "foo", model: .moderation, results: [
.init(categories: .init(hate: false, hateThreatening: false, selfHarm: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false),
categoryScores: .init(hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1),
.init(categories: .init(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: false, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false),
categoryScores: .init(harassment: 0.1, harassmentThreatening: 0.1, hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, selfHarmIntent: 0.1, selfHarmInstructions: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1),
flagged: false)
])
try self.stub(result: moderationsResult)

let result = try await openAI.moderations(query: query)
XCTAssertEqual(result, moderationsResult)
}


func testModerationsIterable() {
let categories = ModerationsResult.Moderation.Categories(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: false, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false)
Mirror(reflecting: categories).children.enumerated().forEach { index, element in
let label = ModerationsResult.Moderation.Categories.CodingKeys.allCases[index].stringValue.replacing(try! Regex("[/-]"), with: { _ in "" })
XCTAssertEqual(label, element.label!.lowercased())
}

let categoryScores = ModerationsResult.Moderation.CategoryScores(harassment: 0.1, harassmentThreatening: 0.1, hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, selfHarmIntent: 0.1, selfHarmInstructions: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1)
Mirror(reflecting: categoryScores).children.enumerated().forEach { index, element in
let label = ModerationsResult.Moderation.CategoryScores.CodingKeys.allCases[index].stringValue.replacing(try! Regex("[/-]"), with: { _ in "" })
XCTAssertEqual(label, element.label!.lowercased())
}
}

func testModerationsError() async throws {
let query = ModerationsQuery(input: "Hello, world!")
let inError = APIError(message: "foo", type: "bar", param: "baz", code: "100")
Expand Down
4 changes: 2 additions & 2 deletions Tests/OpenAITests/OpenAITestsCombine.swift
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ final class OpenAITestsCombine: XCTestCase {
func testModerations() throws {
let query = ModerationsQuery(input: "Hello, world!")
let moderationsResult = ModerationsResult(id: "foo", model: .moderation, results: [
.init(categories: .init(hate: false, hateThreatening: false, selfHarm: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false),
categoryScores: .init(hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1),
.init(categories: .init(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: false, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: false, violenceGraphic: false),
categoryScores: .init(harassment: 0.1, harassmentThreatening: 0.1, hate: 0.1, hateThreatening: 0.1, selfHarm: 0.1, selfHarmIntent: 0.1, selfHarmInstructions: 0.1, sexual: 0.1, sexualMinors: 0.1, violence: 0.1, violenceGraphic: 0.1),
flagged: false)
])
try self.stub(result: moderationsResult)
Expand Down
12 changes: 10 additions & 2 deletions Tests/OpenAITests/OpenAITestsDecoder.swift
Original file line number Diff line number Diff line change
Expand Up @@ -364,18 +364,26 @@ class OpenAITestsDecoder: XCTestCase {
"results": [
{
"categories": {
"harassment": false,
"harassment/threatening": false,
"hate": false,
"hate/threatening": true,
"self-harm": false,
"self-harm/intent": false,
"self-harm/instructions": false,
"sexual": false,
"sexual/minors": false,
"violence": true,
"violence/graphic": false
},
"category_scores": {
"harassment": 0.0431830403405153,
"harassment/threatening": 0.1229622494034651,
"hate": 0.22714105248451233,
"hate/threatening": 0.4132447838783264,
"self-harm": 0.00523239187896251,
"self-harm/intent": 0.307237106114835,
"self-harm/instructions": 0.42189350703096,
"sexual": 0.01407341007143259,
"sexual/minors": 0.0038522258400917053,
"violence": 0.9223177433013916,
Expand All @@ -388,8 +396,8 @@ class OpenAITestsDecoder: XCTestCase {
"""

let expectedValue = ModerationsResult(id: "modr-5MWoLO", model: .moderation, results: [
.init(categories: .init(hate: false, hateThreatening: true, selfHarm: false, sexual: false, sexualMinors: false, violence: true, violenceGraphic: false),
categoryScores: .init(hate: 0.22714105248451233, hateThreatening: 0.4132447838783264, selfHarm: 0.00523239187896251, sexual: 0.01407341007143259, sexualMinors: 0.0038522258400917053, violence: 0.9223177433013916, violenceGraphic: 0.036865197122097015),
.init(categories: .init(harassment: false, harassmentThreatening: false, hate: false, hateThreatening: true, selfHarm: false, selfHarmIntent: false, selfHarmInstructions: false, sexual: false, sexualMinors: false, violence: true, violenceGraphic: false),
categoryScores: .init(harassment: 0.0431830403405153, harassmentThreatening: 0.1229622494034651, hate: 0.22714105248451233, hateThreatening: 0.4132447838783264, selfHarm: 0.00523239187896251, selfHarmIntent: 0.307237106114835, selfHarmInstructions: 0.42189350703096, sexual: 0.01407341007143259, sexualMinors: 0.0038522258400917053, violence: 0.9223177433013916, violenceGraphic: 0.036865197122097015),
flagged: true)
])
try decode(data, expectedValue)
Expand Down

0 comments on commit edf8830

Please sign in to comment.