Skip to content

Commit

Permalink
Upgrade to Scala 2.13 (#70)
Browse files Browse the repository at this point in the history
  • Loading branch information
juanitodread authored Sep 15, 2021
1 parent 883f641 commit 8dec230
Show file tree
Hide file tree
Showing 10 changed files with 36 additions and 44 deletions.
21 changes: 11 additions & 10 deletions build.sbt
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
name := "pitaya-finch"
version := "0.1.0"
version := "1.2.1"

lazy val root = (project in file("."))

scalaVersion := "2.12.8"
scalaVersion := "2.13.6"

val finchVersion = "0.26.0"
val circeVersion = "0.11.0"
val configVersion = "1.3.3"
val scalatestVersion = "3.0.5"
val twitterServerVersion = "19.2.0"
val logbackVersion = "1.2.3"
val finchVersion = "0.32.1"
val circeVersion = "0.13.0"
val configVersion = "1.4.1"
val scalatestVersion = "3.2.9"
val twitterServerVersion = "20.9.0"
val logbackVersion = "1.2.6"

val openNlpVersion = "1.9.1"
val openNlpVersion = "1.9.3"

libraryDependencies ++= Seq(
"com.github.finagle" %% "finchx-core" % finchVersion,
Expand All @@ -24,6 +24,7 @@ libraryDependencies ++= Seq(
"com.twitter" %% "finagle-stats" % twitterServerVersion,
"com.twitter" %% "twitter-server-logback-classic" % twitterServerVersion,
"ch.qos.logback" % "logback-classic" % logbackVersion,
"org.scalactic" %% "scalactic" % scalatestVersion,
"org.scalatest" %% "scalatest" % scalatestVersion % "test",

"org.apache.opennlp" % "opennlp-tools" % openNlpVersion
Expand All @@ -40,4 +41,4 @@ enablePlugins(AshScriptPlugin)

dockerBaseImage := "openjdk:jre-alpine"

dockerExposedPorts ++= Seq(8080, 9990)
dockerExposedPorts ++= Seq(8080, 9990)
2 changes: 1 addition & 1 deletion project/build.properties
Original file line number Diff line number Diff line change
@@ -1 +1 @@
sbt.version=1.2.6
sbt.version=1.3.13
2 changes: 1 addition & 1 deletion project/plugins.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.8.2")

// Scala Native
addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.4.0")
addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.4.0")
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class Chunker(model: ChunkerModel) {
}

object Chunker {
private final val chunker = new Chunker(Await.result(ChunkerModelAsync(), 5 seconds))
private final val chunker = new Chunker(Await.result(ChunkerModelAsync(), 5.seconds))

def apply(tokens: List[String], tags: List[String]): List[Chunk] = {
chunker.chunk(tokens, tags)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ object EntityRecognizer extends NumberFormatter {
OrganizationEntityModelAsync(),
PercentageEntityModelAsync(),
PersonEntityModelAsync(),
TimeEntityModelAsync()).map(futureModel => Await.result(futureModel, 5 seconds))
TimeEntityModelAsync()).map(futureModel => Await.result(futureModel, 5.seconds))
.map(model => new EntityRecognizer(model))

def apply(sentence: String): List[Entity] = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class Tagger[T <: PosModel](model: T) extends Tags {
}

object Tagger {
private final val tagger = new Tagger(Await.result(PerceptronModelAsync(), 5 seconds))
private final val tagger = new Tagger(Await.result(PerceptronModelAsync(), 5.seconds))

def apply(sentence: String, withChunk: Boolean = false): TagsResult = {
tagger.tag(sentence, withChunk)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ class EnglishLemmatizer(dictionaryPath: String) extends Tags {
val lemmas: List[Lemma] = tagsMap.keys.map { tag =>
(
englishLemmas.lemmatize(
List(token).toArray,
List(tag).toArray).mkString(""),
Lemma(tag, tagsMap(tag)))
List(token).toArray,
List(tag).toArray).mkString(""),
Lemma(tag, tagsMap(tag)))
}.filter(result => result._1 != "O")
.map(_._2).toList
LemmaResult(token, lemmas)
Expand Down
6 changes: 4 additions & 2 deletions src/test/scala/org/juanitodread/pitayafinch/UnitSpec.scala
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
package org.juanitodread.pitayafinch

import org.scalatest._
import flatspec._
import matchers._

abstract class UnitSpec extends FlatSpec
with Matchers
abstract class UnitSpec extends AnyFlatSpec
with should.Matchers
with OptionValues
with Inside
with Inspectors
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,16 @@ class EnglishLemmatizerSpec extends UnitSpec {
it should "get the lemma result of a word" in {
val lemmatizer: EnglishLemmatizer = new EnglishLemmatizer(dictionary)
val token: String = "better"
assert(lemmatizer.lemmatize(token) === LemmaResult(
"better",
List(
Lemma("JJR", "Adjective, comparative"),
Lemma("NN", "Noun, singular or mass"),
Lemma("VB", "Verb, base form"),
Lemma("VBP", "Verb, non-3rd person singular present"),
Lemma("RBR", "Adverb, comparative"))))

val lemmas = lemmatizer.lemmatize(token)

assert(lemmas.original === "better")
lemmas.lemmas should contain theSameElementsAs List(
Lemma("JJR", "Adjective, comparative"),
Lemma("NN", "Noun, singular or mass"),
Lemma("VB", "Verb, base form"),
Lemma("VBP", "Verb, non-3rd person singular present"),
Lemma("RBR", "Adverb, comparative"))
}

it should "get the lemma result of a not common word" in {
Expand All @@ -44,21 +46,8 @@ class EnglishLemmatizerSpec extends UnitSpec {
it should "get the lemma result of a list of words" in {
val lemmatizer: EnglishLemmatizer = new EnglishLemmatizer(dictionary)
val tokens: List[String] = List("better", "meeting")
assert(
lemmatizer.lemmatize(tokens) === List(
LemmaResult(
"better",
List(
Lemma("JJR", "Adjective, comparative"),
Lemma("NN", "Noun, singular or mass"),
Lemma("VB", "Verb, base form"),
Lemma("VBP", "Verb, non-3rd person singular present"),
Lemma("RBR", "Adverb, comparative"))),
LemmaResult(
"meeting",
List(
Lemma("VBG", "Verb, gerund, or present participle"),
Lemma("NNN", "Noun")))))

lemmatizer.lemmatize(tokens) should have length 2
}

it should "get the lemma result of an empty list of words" in {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class NormalizerEndpointSpec extends UnitSpec {
List("hello", "bye"),
List(
LemmaResult("hello", List(Lemma("NN", "Noun, singular or mass"))),
LemmaResult("bye", List(Lemma("JJ", "Adjective"), Lemma("NN", "Noun, singular or mass"))))))
LemmaResult("bye", List(Lemma("NN", "Noun, singular or mass"), Lemma("JJ", "Adjective"))))))
}
}
}

0 comments on commit 8dec230

Please sign in to comment.