Skip to content

Commit cef96d5

Browse files
Merge pull request #5014 from lchen-2101/chore/cve_update_250424
chore: cve updates
2 parents 35a8ace + 7dbc1eb commit cef96d5

File tree

12 files changed

+81
-141
lines changed

12 files changed

+81
-141
lines changed

.github/workflows/helper-functions.yaml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,13 @@ on:
88
jobs:
99
test-service:
1010
runs-on: ubuntu-latest
11+
services:
12+
cassandra:
13+
image: "cassandra:3.11.4"
14+
ports:
15+
- "9042:9042"
16+
env:
17+
HMDA_RUNTIME_MODE: "docker-compose"
1118

1219
steps:
1320
- name: Checkout
@@ -19,7 +26,7 @@ jobs:
1926
- name: Setup JDK
2027
uses: actions/setup-java@v4
2128
with:
22-
java-version: '11'
29+
java-version: '17'
2330
distribution: 'adopt'
2431

2532
- name: Run hmda-platform Tests

README.md

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -174,15 +174,26 @@ export JAVA_HOME=$HOME/.asdf/installs/java/openjdk-13.0.2
174174
175175
### Running with sbt
176176
177-
The HMDA Platform can run locally using [`sbt`](https://www.scala-sbt.org/) with an [embedded Cassandra](https://doc.akka.io/docs/alpakka-kafka/current/) and [embedded Kafka](https://doc.akka.io/docs/alpakka-kafka/current/). To get started:
177+
~~The HMDA Platform can run locally using [`sbt`](https://www.scala-sbt.org/) with an [embedded Cassandra](https://doc.akka.io/docs/alpakka-kafka/current/) and [embedded Kafka](https://doc.akka.io/docs/alpakka-kafka/current/). To get started:~~
178+
_removing embedded cassandra allows us to use more up-to-date java versions_
178179
180+
For local development, supporting services need to be started first in docker:
181+
182+
1. Open terminal with `hmda-platform` root as the working directory
183+
2. Start the supporting services of Kafka, Cassandra, and PostgreSQL:
184+
```bash
185+
docker compose up -d kafka cassandra pg
186+
```
187+
* the `-d` option runs the services in detached mode, so we can use the same terminal to the run remaining commands, to stop the services run:
188+
```bash
189+
docker compose stop
190+
```
179191
1. Export the following environment variables:
180192
```bash
181193
export CASSANDRA_CLUSTER_HOSTS=localhost
182194
export APP_PORT=2551
183195
```
184-
2. Open terminal with `hmda-platform` root as the working directory
185-
3. Start sbt and run the platform with the following commands:
196+
1. Start sbt and run the platform with the following commands:
186197
```bash
187198
sbt
188199
[...]

build.sbt

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import BuildSettings._
33
import sbtassembly.AssemblyPlugin.autoImport.assemblyMergeStrategy
44
import com.typesafe.sbt.packager.docker._
55

6-
lazy val commonDeps = Seq(logback, scalaTest, scalaCheck, akkaHttpSprayJson, testContainers, apacheCommonsIO, log4jToSlf4j, kubernetesApi)
6+
lazy val commonDeps = Seq(logback, scalaTest, scalaCheck, akkaHttpSprayJson, testContainers, apacheCommonsIO, log4jToSlf4j, kubernetesApi, scalaLogging)
77

88
lazy val sparkDeps =
99
Seq(
@@ -33,7 +33,6 @@ lazy val akkaDeps = Seq(
3333
akkaCors,
3434
mskdriver,
3535
akkaKafkaStreams,
36-
embeddedKafka,
3736
alpakkaS3,
3837
akkaQuartzScheduler,
3938
alpakkaFile
@@ -47,8 +46,7 @@ lazy val akkaPersistenceDeps =
4746
akkaPersistenceQuery,
4847
akkaClusterShardingTyped,
4948
akkaPersistenceCassandra,
50-
keyspacedriver,
51-
cassandraLauncher
49+
keyspacedriver
5250
)
5351

5452
lazy val akkaHttpDeps =

common/src/main/scala/hmda/persistence/util/CassandraUtil.scala

Lines changed: 0 additions & 26 deletions
This file was deleted.

common/src/main/scala/hmda/util/RealTimeConfig.scala

Lines changed: 37 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -13,37 +13,43 @@ class RealTimeConfig(val cmName: String, val ns: String) {
1313
private var currentConfig: Option[Config] = None
1414
private var factory: Option[SharedInformerFactory] = None
1515

16-
try {
17-
val client = io.kubernetes.client.util.Config.defaultClient()
18-
val api = new CoreV1Api(client)
19-
factory = Option(new SharedInformerFactory(client))
20-
val informer = factory.get.sharedIndexInformerFor((params: CallGeneratorParams) => {
21-
api.listNamespacedConfigMapCall(
22-
ns, null, null, null, s"metadata.name=$cmName", null, null, params.resourceVersion, null, null, params.timeoutSeconds, params.watch, null)
23-
}, classOf[V1ConfigMap], classOf[V1ConfigMapList])
24-
informer.addEventHandler(new ResourceEventHandler[V1ConfigMap] {
25-
override def onAdd(obj: V1ConfigMap): Unit = {
26-
log.debug("cm added: {}", obj)
27-
setConfig(obj)
28-
}
29-
30-
override def onUpdate(oldObj: V1ConfigMap, newObj: V1ConfigMap): Unit = {
31-
log.debug("cm updated: {}", newObj)
32-
setConfig(newObj)
33-
}
34-
35-
override def onDelete(obj: V1ConfigMap, deletedFinalStateUnknown: Boolean): Unit = log.warn("cm deleted: {}, deleteStateUnknown: {}", obj, deletedFinalStateUnknown)
36-
})
37-
38-
factory.get.startAllRegisteredInformers()
39-
setConfig(api.readNamespacedConfigMap(cmName, ns, null))
40-
} catch {
41-
case e: ApiException =>
42-
log.error(s"Failed to setup informer, most likely role permission issues. ${e.getResponseBody}", e)
43-
factory.get.stopAllRegisteredInformers()
44-
case e: Throwable =>
45-
log.error(s"Failed to setup informer", e)
46-
factory.get.stopAllRegisteredInformers()
16+
private val config = ConfigFactory.load()
17+
18+
private val runMode = if (config.hasPath("hmda.runtime.mode")) config.getString("hmda.runtime.mode") else "dev"
19+
20+
if (runMode == "kubernetes") {
21+
try {
22+
val client = io.kubernetes.client.util.Config.defaultClient()
23+
val api = new CoreV1Api(client)
24+
factory = Option(new SharedInformerFactory(client))
25+
val informer = factory.get.sharedIndexInformerFor((params: CallGeneratorParams) => {
26+
api.listNamespacedConfigMapCall(
27+
ns, null, null, null, s"metadata.name=$cmName", null, null, params.resourceVersion, null, null, params.timeoutSeconds, params.watch, null)
28+
}, classOf[V1ConfigMap], classOf[V1ConfigMapList])
29+
informer.addEventHandler(new ResourceEventHandler[V1ConfigMap] {
30+
override def onAdd(obj: V1ConfigMap): Unit = {
31+
log.debug("cm added: {}", obj)
32+
setConfig(obj)
33+
}
34+
35+
override def onUpdate(oldObj: V1ConfigMap, newObj: V1ConfigMap): Unit = {
36+
log.debug("cm updated: {}", newObj)
37+
setConfig(newObj)
38+
}
39+
40+
override def onDelete(obj: V1ConfigMap, deletedFinalStateUnknown: Boolean): Unit = log.warn("cm deleted: {}, deleteStateUnknown: {}", obj, deletedFinalStateUnknown)
41+
})
42+
43+
factory.get.startAllRegisteredInformers()
44+
setConfig(api.readNamespacedConfigMap(cmName, ns, null))
45+
} catch {
46+
case e: ApiException =>
47+
log.error(s"Failed to setup informer, most likely role permission issues. ${e.getResponseBody}", e)
48+
factory.get.stopAllRegisteredInformers()
49+
case e: Throwable =>
50+
log.error(s"Failed to setup informer", e)
51+
factory.get.stopAllRegisteredInformers()
52+
}
4753
}
4854

4955
private def setConfig(cm: V1ConfigMap): Unit = {

common/src/test/scala/hmda/persistence/AkkaCassandraPersistenceSpec.scala

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,16 @@ package hmda.persistence
22

33
import java.time.Instant
44
import java.util.concurrent.TimeUnit
5-
65
import akka.actor
76
import akka.actor.testkit.typed.scaladsl.TestProbe
87
import akka.actor.typed.scaladsl.Behaviors
98
import akka.actor.typed.scaladsl.adapter._
10-
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, TypedActorContext }
9+
import akka.actor.typed.{ActorRef, ActorSystem, Behavior, TypedActorContext}
1110
import akka.persistence.typed.PersistenceId
1211
import akka.persistence.typed.scaladsl.EventSourcedBehavior.CommandHandler
13-
import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior }
14-
import hmda.persistence.util.CassandraUtil
12+
import akka.persistence.typed.scaladsl.{Effect, EventSourcedBehavior}
1513
import org.scalacheck.Gen
16-
import org.scalatest.{ BeforeAndAfterAll, WordSpec }
14+
import org.scalatest.{BeforeAndAfterAll, WordSpec}
1715

1816
import scala.concurrent.duration._
1917

@@ -28,13 +26,11 @@ abstract class AkkaCassandraPersistenceSpec extends WordSpec with BeforeAndAfter
2826
implicit val typedSystem: ActorSystem[_]
2927

3028
override def beforeAll(): Unit = {
31-
CassandraUtil.startEmbeddedCassandra()
3229
awaitPersistenceInit()
3330
super.beforeAll()
3431
}
3532

3633
override def afterAll(): Unit = {
37-
CassandraUtil.shutdown()
3834
system.terminate()
3935
super.afterAll()
4036
}

common/src/test/scala/hmda/utils/EmbeddedCassandra.scala

Lines changed: 0 additions & 17 deletions
This file was deleted.

common/src/test/scala/hmda/utils/EmbeddedKafka.scala

Lines changed: 0 additions & 22 deletions
This file was deleted.

data-browser/src/main/scala/hmda/dataBrowser/services/S3FileService.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
package hmda.dataBrowser.services
22

33
import akka.NotUsed
4+
import akka.actor.typed.ActorSystem
45
import akka.http.scaladsl.model.ContentTypes
5-
import akka.stream.Materializer
66
import akka.stream.alpakka.s3.S3Headers
77
import akka.stream.alpakka.s3.scaladsl.S3
88
import akka.stream.scaladsl.{ Sink, Source }
@@ -16,7 +16,7 @@ import org.slf4j.LoggerFactory
1616

1717
// $COVERAGE-OFF$
1818
// All this does is use the Alpakka S3 APIs
19-
class S3FileService(implicit mat: Materializer) extends FileService with Settings {
19+
class S3FileService(implicit system: ActorSystem[Nothing]) extends FileService with Settings {
2020

2121
private final val log = LoggerFactory.getLogger(getClass)
2222

hmda/src/main/scala/hmda/HmdaPlatform.scala

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,8 @@ import akka.stream.Materializer
1212
import com.typesafe.config.ConfigFactory
1313
import hmda.api.HmdaApi
1414
import hmda.persistence.HmdaPersistence
15-
import hmda.persistence.util.CassandraUtil
1615
import hmda.publication.{HmdaPublication, KafkaUtils}
1716
import hmda.validation.HmdaValidation
18-
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
1917
import org.slf4j.LoggerFactory
2018

2119
// $COVERAGE-OFF$
@@ -72,21 +70,8 @@ object HmdaPlatform extends App {
7270
implicit val mat = Materializer(system)
7371
implicit val cluster = Cluster(system)
7472

75-
if (runtimeMode == "dcos" || runtimeMode == "kubernetes" || runtimeMode == "docker-compose" || runtimeMode == "kind") {
76-
ClusterBootstrap(system).start()
77-
AkkaManagement(system).start()
78-
}
79-
80-
if (runtimeMode == "dev") {
81-
CassandraUtil.startEmbeddedCassandra()
82-
AkkaManagement(system).start()
83-
implicit val embeddedKafkaConfig: EmbeddedKafkaConfig = EmbeddedKafkaConfig(
84-
sys.env.getOrElse("HMDA_LOCAL_KAFKA_PORT", "9092").toInt,
85-
sys.env.getOrElse("HMDA_LOCAL_ZK_PORT", "2182").toInt,
86-
Map("offsets.topic.replication.factor" -> "1", "zookeeper.connection.timeout.ms" -> "20000")
87-
)
88-
EmbeddedKafka.start()
89-
}
73+
ClusterBootstrap(system).start()
74+
AkkaManagement(system).start()
9075

9176
// TODO: Fix this as initializing it here is not a good idea, this should be initialized in HmdaPersistence and passed into HmdaValidationError
9277
val stringKafkaProducer = KafkaUtils.getStringKafkaProducer(system)

0 commit comments

Comments
 (0)