From b245ecc9177a7389a8f11f886b6d8ceaaa898758 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Minh=20Nguye=CC=82=CC=83n?= Date: Wed, 30 Oct 2019 03:41:24 -0700 Subject: [PATCH] Removed Objective-C compatibility --- Sources/MapboxSpeech/MBSpeechOptions.swift | 103 ++----------- Sources/MapboxSpeech/MapboxSpeech.swift | 137 +++++++++++------- .../MapboxSpeechTests/MapboxVoiceTests.swift | 2 +- readme.md | 51 +------ 4 files changed, 101 insertions(+), 192 deletions(-) diff --git a/Sources/MapboxSpeech/MBSpeechOptions.swift b/Sources/MapboxSpeech/MBSpeechOptions.swift index 65caa82..b59a049 100644 --- a/Sources/MapboxSpeech/MBSpeechOptions.swift +++ b/Sources/MapboxSpeech/MBSpeechOptions.swift @@ -1,104 +1,29 @@ import Foundation -@objc(MBTextType) -public enum TextType: UInt, CustomStringConvertible, Codable { - +public enum TextType: String, Codable { case text - case ssml - - public init?(description: String) { - let type: TextType - switch description { - case "text": - type = .text - case "ssml": - type = .ssml - default: - return nil - } - self.init(rawValue: type.rawValue) - } - - public var description: String { - switch self { - case .text: - return "text" - case .ssml: - return "ssml" - } - } } -@objc(MBAudioFormat) -public enum AudioFormat: UInt, CustomStringConvertible, Codable { - +public enum AudioFormat: String, Codable { case mp3 - - public init?(description: String) { - let format: AudioFormat - switch description { - case "mp3": - format = .mp3 - default: - return nil - } - self.init(rawValue: format.rawValue) - } - - public var description: String { - switch self { - case .mp3: - return "mp3" - } - } } -@objc(MBSpeechGender) -public enum SpeechGender: UInt, CustomStringConvertible, Codable { - +public enum SpeechGender: String, Codable { case female - case male - case neuter - - public init?(description: String) { - let gender: SpeechGender - switch description { - case "female": - gender = .female - case "male": - gender = .male - default: - gender = .neuter - } - self.init(rawValue: gender.rawValue) - } - - public var description: String { - switch self { - case .female: - return "female" - case .male: - return "male" - case .neuter: - return "neuter" - } - } } -@objc(MBSpeechOptions) -open class SpeechOptions: NSObject, Codable { - - @objc public init(text: String) { +open class SpeechOptions: Codable { + public init(text: String) { self.text = text - self.textType = .text + textType = .text } - @objc public init(ssml: String) { + public init(ssml: String) { self.text = ssml - self.textType = .ssml + textType = .ssml } /** @@ -106,35 +31,33 @@ open class SpeechOptions: NSObject, Codable { If `SSML` is provided, `TextType` must be `TextType.ssml`. */ - @objc open var text: String - + open var text: String /** Type of text to synthesize. `SSML` text must be valid `SSML` for request to work. */ - @objc let textType: TextType - + let textType: TextType /** Audio format for outputted audio file. */ - @objc open var outputFormat: AudioFormat = .mp3 + open var outputFormat: AudioFormat = .mp3 /** The locale in which the audio is spoken. By default, the user's system locale will be used to decide upon an appropriate voice. */ - @objc open var locale: Locale = Locale.autoupdatingCurrent + open var locale: Locale = .autoupdatingCurrent /** Gender of voice speeking text. Note: not all languages have both genders. */ - @objc open var speechGender: SpeechGender = .neuter + open var speechGender: SpeechGender = .neuter /** The path of the request URL, not including the hostname or any parameters. diff --git a/Sources/MapboxSpeech/MapboxSpeech.swift b/Sources/MapboxSpeech/MapboxSpeech.swift index 51ac046..098d604 100644 --- a/Sources/MapboxSpeech/MapboxSpeech.swift +++ b/Sources/MapboxSpeech/MapboxSpeech.swift @@ -2,9 +2,6 @@ import Foundation typealias JSONDictionary = [String: Any] -/// Indicates that an error occurred in MapboxSpeech. -public let MBSpeechErrorDomain = "MBSpeechErrorDomain" - /// The Mapbox access token specified in the main application bundle’s Info.plist. let defaultAccessToken = Bundle.main.object(forInfoDictionaryKey: "MGLMapboxAccessToken") as? String @@ -64,10 +61,8 @@ var skuToken: String? { Use `AVAudioPlayer` to play the audio that a speech synthesizer object produces. */ -@objc(MBSpeechSynthesizer) -open class SpeechSynthesizer: NSObject { - - public typealias CompletionHandler = (_ data: Data?, _ error: NSError?) -> Void +open class SpeechSynthesizer { + public typealias CompletionHandler = (_ data: Data?, _ error: SpeechError?) -> Void // MARK: Creating a Speech Object @@ -76,14 +71,13 @@ open class SpeechSynthesizer: NSObject { To use this object, specify a Mapbox [access token](https://www.mapbox.com/help/define-access-token/) in the `MGLMapboxAccessToken` key in the main application bundle’s Info.plist. */ - @objc(sharedSpeechSynthesizer) public static let shared = SpeechSynthesizer(accessToken: nil) /// The API endpoint to request the audio from. - @objc public private(set) var apiEndpoint: URL + public private(set) var apiEndpoint: URL /// The Mapbox access token to associate the request with. - @objc public let accessToken: String + public let accessToken: String /** Initializes a newly created speech synthesizer object with an optional access token and host. @@ -91,7 +85,7 @@ open class SpeechSynthesizer: NSObject { - parameter accessToken: A Mapbox [access token](https://www.mapbox.com/help/define-access-token/). If an access token is not specified when initializing the speech synthesizer object, it should be specified in the `MGLMapboxAccessToken` key in the main application bundle’s Info.plist. - parameter host: An optional hostname to the server API. The Mapbox Voice API endpoint is used by default. */ - @objc public init(accessToken: String?, host: String?) { + public init(accessToken: String?, host: String?) { let accessToken = accessToken ?? defaultAccessToken assert(accessToken != nil && !accessToken!.isEmpty, "A Mapbox access token is required. Go to . In Info.plist, set the MGLMapboxAccessToken key to your access token, or use the Speech(accessToken:host:) initializer.") @@ -108,7 +102,7 @@ open class SpeechSynthesizer: NSObject { - parameter accessToken: A Mapbox [access token](https://www.mapbox.com/help/define-access-token/). If an access token is not specified when initializing the speech synthesizer object, it should be specified in the `MGLMapboxAccessToken` key in the main application bundle’s Info.plist. */ - @objc public convenience init(accessToken: String?) { + public convenience init(accessToken: String?) { self.init(accessToken: accessToken, host: nil) } @@ -123,7 +117,6 @@ open class SpeechSynthesizer: NSObject { - parameter completionHandler: The closure (block) to call with the resulting audio. This closure is executed on the application’s main thread. - returns: The data task used to perform the HTTP request. If, while waiting for the completion handler to execute, you no longer want the resulting audio, cancel this task. */ - @objc(audioDataWithOptions:completionHandler:) @discardableResult open func audioData(with options: SpeechOptions, completionHandler: @escaping CompletionHandler) -> URLSessionDataTask { let url = self.url(forSynthesizing: options) let task = dataTask(with: url, completionHandler: { (data) in @@ -144,34 +137,46 @@ open class SpeechSynthesizer: NSObject { - returns: The data task for the URL. - postcondition: The caller must resume the returned task. */ - fileprivate func dataTask(with url: URL, completionHandler: @escaping (_ data: Data) -> Void, errorHandler: @escaping (_ error: NSError) -> Void) -> URLSessionDataTask { + fileprivate func dataTask(with url: URL, completionHandler: @escaping (_ data: Data) -> Void, errorHandler: @escaping (_ error: SpeechError) -> Void) -> URLSessionDataTask { var request = URLRequest(url: url) request.setValue(userAgent, forHTTPHeaderField: "User-Agent") - let task = URLSession.shared.dataTask(with: request as URLRequest) { (data, response, error) in + let task = URLSession.shared.dataTask(with: request as URLRequest) { (possibleData, possibleResponse, possibleError) in + guard let response = possibleResponse else { + errorHandler(.invalidResponse) + return + } + + guard let data = possibleData else { + errorHandler(.noData) + return + } + + if let error = possibleError { + errorHandler(.unknown(response: possibleResponse, underlying: error, code: nil, message: nil)) + return + } // Parse error object - var errorJSON: JSONDictionary = [:] - if let data = data, response?.mimeType == "application/json" { + if response.mimeType == "application/json" { + var errorJSON: JSONDictionary = [:] do { errorJSON = try JSONSerialization.jsonObject(with: data, options: []) as! JSONDictionary } catch { - assert(false, "Invalid data") + errorHandler(SpeechSynthesizer.informativeError(code: nil, message: nil, response: response, underlyingError: error)) } - } - - let apiStatusCode = errorJSON["code"] as? String - let apiMessage = errorJSON["message"] as? String - guard data != nil && error == nil && ((apiStatusCode == nil && apiMessage == nil) || apiStatusCode == "Ok") else { - let apiError = SpeechSynthesizer.informativeError(describing: errorJSON, response: response, underlyingError: error as NSError?) - DispatchQueue.main.async { - errorHandler(apiError) + + let apiStatusCode = errorJSON["code"] as? String + let apiMessage = errorJSON["message"] as? String + guard (apiStatusCode == nil && apiMessage == nil) || apiStatusCode == "Ok" else { + let apiError = SpeechSynthesizer.informativeError(code: apiStatusCode, message: apiMessage, response: response, underlyingError: possibleError) + DispatchQueue.main.async { + errorHandler(apiError) + } + return } - return } - guard let data = data else { return } - DispatchQueue.main.async { completionHandler(data) } @@ -183,7 +188,6 @@ open class SpeechSynthesizer: NSObject { /** The HTTP URL used to fetch audio from the API. */ - @objc(URLForSynthesizingSpeechWithOptions:) open func url(forSynthesizing options: SpeechOptions) -> URL { var params = options.params @@ -202,35 +206,62 @@ open class SpeechSynthesizer: NSObject { /** Returns an error that supplements the given underlying error with additional information from the an HTTP response’s body or headers. */ - static func informativeError(describing json: JSONDictionary, response: URLResponse?, underlyingError error: NSError?) -> NSError { - let apiStatusCode = json["code"] as? String - var userInfo = error?.userInfo ?? [:] + static func informativeError(code: String?, message: String?, response: URLResponse?, underlyingError error: Error?) -> SpeechError { if let response = response as? HTTPURLResponse { - var failureReason: String? = nil - var recoverySuggestion: String? = nil - switch (response.statusCode, apiStatusCode ?? "") { + switch (response.statusCode, code ?? "") { case (429, _): - if let timeInterval = response.rateLimitInterval, let maximumCountOfRequests = response.rateLimit { - let intervalFormatter = DateComponentsFormatter() - intervalFormatter.unitsStyle = .full - let formattedInterval = intervalFormatter.string(from: timeInterval) ?? "\(timeInterval) seconds" - let formattedCount = NumberFormatter.localizedString(from: NSNumber(value: maximumCountOfRequests), number: .decimal) - failureReason = "More than \(formattedCount) requests have been made with this access token within a period of \(formattedInterval)." - } - if let rolloverTime = response.rateLimitResetTime { - let formattedDate = DateFormatter.localizedString(from: rolloverTime, dateStyle: .long, timeStyle: .long) - recoverySuggestion = "Wait until \(formattedDate) before retrying." - } + return .rateLimited(rateLimitInterval: response.rateLimitInterval, rateLimit: response.rateLimit, resetTime: response.rateLimitResetTime) default: - failureReason = json["message"] as? String + return .unknown(response: response, underlying: error, code: code, message: message) } - userInfo[NSLocalizedFailureReasonErrorKey] = failureReason ?? userInfo[NSLocalizedFailureReasonErrorKey] ?? HTTPURLResponse.localizedString(forStatusCode: error?.code ?? -1) - userInfo[NSLocalizedRecoverySuggestionErrorKey] = recoverySuggestion ?? userInfo[NSLocalizedRecoverySuggestionErrorKey] } - if let error = error { - userInfo[NSUnderlyingErrorKey] = error + return .unknown(response: response, underlying: error, code: code, message: message) + } +} + +public enum SpeechError: LocalizedError { + case noData + case invalidResponse + case rateLimited(rateLimitInterval: TimeInterval?, rateLimit: UInt?, resetTime: Date?) + case unknown(response: URLResponse?, underlying: Error?, code: String?, message: String?) + + public var failureReason: String? { + switch self { + case .noData: + return "The server returned an empty response." + case .invalidResponse: + return "The server returned a response that isn’t correctly formatted." + case let .rateLimited(rateLimitInterval: interval, rateLimit: limit, _): + let intervalFormatter = DateComponentsFormatter() + intervalFormatter.unitsStyle = .full + guard let interval = interval, let limit = limit else { + return "Too many requests." + } + let formattedInterval = intervalFormatter.string(from: interval) ?? "\(interval) seconds" + let formattedCount = NumberFormatter.localizedString(from: NSNumber(value: limit), number: .decimal) + return "More than \(formattedCount) requests have been made with this access token within a period of \(formattedInterval)." + case let .unknown(_, underlying: error, _, message): + return message + ?? (error as NSError?)?.userInfo[NSLocalizedFailureReasonErrorKey] as? String + ?? HTTPURLResponse.localizedString(forStatusCode: (error as NSError?)?.code ?? -1) + } + } + + public var recoverySuggestion: String? { + switch self { + case .noData: + return nil + case .invalidResponse: + return nil + case let .rateLimited(rateLimitInterval: _, rateLimit: _, resetTime: rolloverTime): + guard let rolloverTime = rolloverTime else { + return nil + } + let formattedDate: String = DateFormatter.localizedString(from: rolloverTime, dateStyle: .long, timeStyle: .long) + return "Wait until \(formattedDate) before retrying." + case let .unknown(_, underlying: error, _, _): + return (error as NSError?)?.userInfo[NSLocalizedRecoverySuggestionErrorKey] as? String } - return NSError(domain: error?.domain ?? MBSpeechErrorDomain, code: error?.code ?? -1, userInfo: userInfo) } } diff --git a/Tests/MapboxSpeechTests/MapboxVoiceTests.swift b/Tests/MapboxSpeechTests/MapboxVoiceTests.swift index d7c1a9c..59932e7 100644 --- a/Tests/MapboxSpeechTests/MapboxVoiceTests.swift +++ b/Tests/MapboxSpeechTests/MapboxVoiceTests.swift @@ -41,7 +41,7 @@ class MapboxVoiceTests: XCTestCase { options.locale = Locale(identifier: "en_US") var audio: Data? - let task = voice.audioData(with: options) { (data: Data?, error: NSError?) in + let task = voice.audioData(with: options) { (data: Data?, error: SpeechError?) in XCTAssertNil(error) XCTAssertNotNil(data) audio = data! diff --git a/readme.md b/readme.md index 1be1e0e..2281cb1 100644 --- a/readme.md +++ b/readme.md @@ -2,6 +2,8 @@ Mapbox Speech connects your iOS, macOS, tvOS, or watchOS application to the Mapbox Voice API. Take turn instructions from the [Mapbox Directions API](https://www.mapbox.com/api-documentation/#directions) and read them aloud naturally in multiple languages. This library is specifically designed to work with [MapboxDirections.swift](https://github.com/mapbox/MapboxDirections.swift/) as part of the [Mapbox Navigation SDK for iOS](https://github.com/mapbox/mapbox-navigation-ios/). +This library is compatible with applications written in Swift. Version 2.0 was the last version of this library to support applications written in Objective-C or AppleScript. + ## Getting started Specify the following dependency in your [Carthage](https://github.com/Carthage/Carthage) Cartfile: @@ -28,26 +30,16 @@ Then `import MapboxSpeech` or `@import MapboxSpeech;`. You’ll need a [Mapbox access token](https://www.mapbox.com/developers/api/#access-tokens) in order to use the API. If you’re already using the [Mapbox Maps SDK for iOS](https://www.mapbox.com/ios-sdk/) or [macOS SDK](https://mapbox.github.io/mapbox-gl-native/macos/), Mapbox Speech automatically recognizes your access token, as long as you’ve placed it in the `MGLMapboxAccessToken` key of your application’s Info.plist file. -The examples below are each provided in Swift (denoted with `main.swift`) and Objective-C (`main.m`). - ### Basics -The main speech synthesis class is SpeechSynthesizer (in Swift) or MBSpeechSynthesizer (in Objective-C). Create a speech synthesizer object using your access token: +The main speech synthesis class is `SpeechSynthesizer`. Create a speech synthesizer object using your access token: ```swift -// main.swift import MapboxSpeech let speechSynthesizer = SpeechSynthesizer(accessToken: "<#your access token#>") ``` -```objc -// main.m -@import MapboxSpeech; - -MBSpeechSynthesizer *speechSynthesizer = [[MBSpeechSynthesizer alloc] initWithAccessToken:@"<#your access token#>"]; -``` - Alternatively, you can place your access token in the `MGLMapboxAccessToken` key of your application’s Info.plist file, then use the shared speech synthesizer object: ```swift @@ -55,16 +47,6 @@ Alternatively, you can place your access token in the `MGLMapboxAccessToken` key let speechSynthesizer = SpeechSynthesizer.shared ``` -```objc -// main.m -MBSpeechSynthesizer *speechSynthesizer = [MBSpeechSynthesizer sharedSpeechSynthesizer]; -``` - -```applescript -// AppDelegate.applescript -set theSpeechSynthesizer to sharedSpeechSynthesizer of MBSpeechSynthesizer of the current application -``` - With the directions object in hand, construct a SpeechOptions or MBSpeechOptions object and pass it into the `SpeechSynthesizer.audioData(with:completionHandler:)` method. ```swift @@ -80,30 +62,3 @@ speechSynthesizer.audioData(with: options) { (data: Data?, error: NSError?) in // Do something with the audio! } ``` - -```objc -// main.m - -MBSpeechOptions *options = [[MBSpeechOptions alloc] initWithText:"hello, my name is Bobby"]; -[speechSynthesizer audioDataWithOptions:options completionHandler:^(NSData * _Nullable data, - NSError * _Nullable error) { - if (error) { - NSLog(@"Error synthesizing speech: %@", error); - return; - } - - // Do something with the audio! -}]; -``` - -```applescript --- AppDelegate.applescript - -set theOptions to alloc of MBSpeechOptions of the current application -tell theOptions to initWithText:"hello, my name is Bobby" - -set theURL to theSpeechSynthesizer's URLForSynthesizingSpeechWithOptions:theOptions -set theData to the current application's NSData's dataWithContentsOfURL:theURL - --- Do something with the audio! -```