-
Notifications
You must be signed in to change notification settings - Fork 291
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Service name override for DSM checkpoints in Spark context
- Loading branch information
Showing
15 changed files
with
363 additions
and
138 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
89 changes: 89 additions & 0 deletions
89
...ava-agent/instrumentation/spark/spark_2.12/src/test/groovy/SparkStreamingKafkaTest.groovy
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,89 @@ | ||
import datadog.trace.agent.test.AgentTestRunner | ||
import org.apache.kafka.clients.producer.ProducerRecord | ||
import org.apache.spark.api.java.function.VoidFunction2 | ||
import org.apache.spark.sql.Dataset | ||
import org.apache.spark.sql.Row | ||
import org.apache.spark.sql.SparkSession | ||
import org.apache.spark.sql.streaming.Trigger | ||
import org.junit.Rule | ||
import org.springframework.kafka.core.DefaultKafkaProducerFactory | ||
import org.springframework.kafka.test.EmbeddedKafkaBroker | ||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule | ||
import org.springframework.kafka.test.utils.KafkaTestUtils | ||
|
||
class SparkStreamingKafkaTest extends AgentTestRunner { | ||
static final SOURCE_TOPIC = "source" | ||
static final SINK_TOPIC = "sink" | ||
|
||
@Override | ||
boolean isDataStreamsEnabled() { | ||
return true | ||
} | ||
|
||
@Rule | ||
EmbeddedKafkaRule kafkaRule = new EmbeddedKafkaRule(1, false, 1, SOURCE_TOPIC, SINK_TOPIC) | ||
EmbeddedKafkaBroker embeddedKafka = kafkaRule.embeddedKafka | ||
|
||
@Override | ||
void configurePreAgent() { | ||
super.configurePreAgent() | ||
injectSysConfig("dd.integration.spark.enabled", "true") | ||
injectSysConfig("dd.integration.kafka.enabled", "true") | ||
} | ||
|
||
def "test dsm checkpoints are correctly set"() { | ||
setup: | ||
def appName = "test-app" | ||
def sparkSession = SparkSession.builder() | ||
.config("spark.master", "local[2]") | ||
.config("spark.driver.bindAddress", "localhost") | ||
.appName(appName) | ||
.getOrCreate() | ||
|
||
def producerProps = KafkaTestUtils.producerProps(embeddedKafka.getBrokersAsString()) | ||
def producer = new DefaultKafkaProducerFactory<Integer, String>(producerProps).createProducer() | ||
|
||
when: | ||
for (int i = 0; i < 100; i++) { | ||
producer.send(new ProducerRecord<>(SOURCE_TOPIC, i, i.toString())) | ||
} | ||
producer.flush() | ||
|
||
def df = sparkSession | ||
.readStream() | ||
.format("kafka") | ||
.option("kafka.bootstrap.servers", embeddedKafka.getBrokersAsString()) | ||
.option("startingOffsets", "earliest") | ||
.option("failOnDataLoss", "false") | ||
.option("subscribe", SOURCE_TOPIC) | ||
.load() | ||
|
||
def query = df | ||
.selectExpr("CAST(key AS STRING) as key", "CAST(value AS STRING) as value") | ||
.writeStream() | ||
.format("kafka") | ||
.option("kafka.bootstrap.servers", embeddedKafka.getBrokersAsString()) | ||
.option("checkpointLocation", "/tmp/" + System.currentTimeMillis().toString()) | ||
.option("topic", SINK_TOPIC) | ||
.trigger(Trigger.Once()) | ||
.foreachBatch(new VoidFunction2<Dataset<Row>, Long>() { | ||
@Override | ||
void call(Dataset<Row> rowDataset, Long aLong) throws Exception { | ||
rowDataset.show() | ||
rowDataset.write() | ||
} | ||
}) | ||
.start() | ||
|
||
query.processAllAvailable() | ||
|
||
then: | ||
query.stop() | ||
producer.close() | ||
|
||
// check that checkpoints were written with a service name override == "SparkAppName" | ||
assert TEST_DATA_STREAMS_WRITER.payloads.size() > 0 | ||
assert TEST_DATA_STREAMS_WRITER.services.size() == 1 | ||
assert TEST_DATA_STREAMS_WRITER.services.get(0) == appName | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
100 changes: 100 additions & 0 deletions
100
...strumentation/spark/src/main/java/datadog/trace/instrumentation/spark/SparkConfUtils.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
package datadog.trace.instrumentation.spark; | ||
|
||
import com.fasterxml.jackson.databind.JsonNode; | ||
import com.fasterxml.jackson.databind.ObjectMapper; | ||
import datadog.trace.api.Config; | ||
import de.thetaphi.forbiddenapis.SuppressForbidden; | ||
import org.apache.spark.SparkConf; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
public class SparkConfUtils { | ||
private static final ObjectMapper objectMapper = new ObjectMapper(); | ||
private static final Logger log = LoggerFactory.getLogger(SparkConfUtils.class); | ||
|
||
public static boolean getIsRunningOnDatabricks(SparkConf sparkConf) { | ||
return sparkConf.contains("spark.databricks.sparkContextId"); | ||
} | ||
|
||
public static String getDatabricksClusterName(SparkConf sparkConf) { | ||
return sparkConf.get("spark.databricks.clusterUsageTags.clusterName", null); | ||
} | ||
|
||
public static String getDatabricksServiceName(SparkConf conf, String databricksClusterName) { | ||
if (Config.get().isServiceNameSetByUser()) { | ||
return null; | ||
} | ||
|
||
String serviceName = null; | ||
String runName = getDatabricksRunName(conf); | ||
if (runName != null) { | ||
serviceName = "databricks.job-cluster." + runName; | ||
} else if (databricksClusterName != null) { | ||
serviceName = "databricks.all-purpose-cluster." + databricksClusterName; | ||
} | ||
|
||
return serviceName; | ||
} | ||
|
||
public static String getSparkServiceName(SparkConf conf, boolean isRunningOnDatabricks) { | ||
// If config is not set or running on databricks, not changing the service name | ||
if (!Config.get().useSparkAppNameAsService() || isRunningOnDatabricks) { | ||
return null; | ||
} | ||
|
||
// Keep service set by user, except if it is only "spark" or "hadoop" that can be set by USM | ||
String serviceName = Config.get().getServiceName(); | ||
if (Config.get().isServiceNameSetByUser() | ||
&& !"spark".equals(serviceName) | ||
&& !"hadoop".equals(serviceName)) { | ||
log.debug("Service '{}' explicitly set by user, not using the application name", serviceName); | ||
return null; | ||
} | ||
|
||
String sparkAppName = conf.get("spark.app.name", null); | ||
if (sparkAppName != null) { | ||
log.info("Using Spark application name '{}' as the Datadog service name", sparkAppName); | ||
} | ||
|
||
return sparkAppName; | ||
} | ||
|
||
public static String getServiceNameOverride(SparkConf conf) { | ||
boolean isRunningOnDatabricks = getIsRunningOnDatabricks(conf); | ||
String databricksClusterName = getDatabricksClusterName(conf); | ||
String databricksServiceName = getDatabricksServiceName(conf, databricksClusterName); | ||
String sparkServiceName = getSparkServiceName(conf, isRunningOnDatabricks); | ||
|
||
return databricksServiceName != null ? databricksServiceName : sparkServiceName; | ||
} | ||
|
||
private static String getDatabricksRunName(SparkConf conf) { | ||
String allTags = conf.get("spark.databricks.clusterUsageTags.clusterAllTags", null); | ||
if (allTags == null) { | ||
return null; | ||
} | ||
|
||
try { | ||
// Using the jackson JSON lib used by spark | ||
// https://mvnrepository.com/artifact/org.apache.spark/spark-core_2.12/3.5.0 | ||
JsonNode jsonNode = objectMapper.readTree(allTags); | ||
|
||
for (JsonNode node : jsonNode) { | ||
String key = node.get("key").asText(); | ||
if ("RunName".equals(key)) { | ||
// Databricks jobs launched by Azure Data Factory have an uuid at the end of the name | ||
return removeUuidFromEndOfString(node.get("value").asText()); | ||
} | ||
} | ||
} catch (Exception ignored) { | ||
} | ||
|
||
return null; | ||
} | ||
|
||
@SuppressForbidden // called at most once per spark application | ||
private static String removeUuidFromEndOfString(String input) { | ||
return input.replaceAll( | ||
"_[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", ""); | ||
} | ||
} |
Oops, something went wrong.