Gremlin Spark Java Maven项目-查询响应速度慢

问题描述

我已经编写了一个程序,以便在Spark的帮助下在Gremlin上执行一些查询(我使用Janus Graph,并使用Cassandra和Solr作为引擎),但是查询结果非常慢。

很可能我设置不正确。

这是我使用的代码

驱动程序:

import org.apache.commons.configuration.Configuration;
import org.apache.tinkerpop.gremlin.spark.process.computer.SparkGraphComputer;
import org.apache.tinkerpop.gremlin.structure.Graph;
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory;

public class SimpleApp {


    public static void main(String[] args) throws Exception {

        Configuration config = GraphTraversalProvider.makeLocal();
        Graph hadoopGraph = GraphFactory.open(config);


        Long totalVertices = hadoopGraph.traversal().withComputer(SparkGraphComputer.class).V().count().next();
        System.out.println("IT WORKED: " + totalVertices);


        hadoopGraph.close();

    }
}

GraphTraversalProvider类

import org.apache.commons.configuration.BaseConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.tinkerpop.gremlin.hadoop.Constants;

public class GraphTraversalProvider {

    private static final String KEY_SPACE = "janusgraph";
    private static final String CASSANDRA_ADDRESS = "localhost";

    public static Configuration makeLocal() {
        return make(true);
    }

    public static Configuration makeRemote() {
        return make(false);
    }

    private static Configuration make(boolean local) {

        final Configuration hadoopConfig = new BaseConfiguration();

        hadoopConfig.setProperty("gremlin.graph","org.apache.tinkerpop.gremlin.hadoop.structure.HadoopGraph");
        hadoopConfig.setProperty(Constants.GREMLIN_HADOOP_GRAPH_READER,"org.janusgraph.hadoop.formats.cql.CqlInputFormat");
        hadoopConfig.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER,"org.apache.hadoop.mapreduce.lib.output.NullOutputFormat");

        hadoopConfig.setProperty(Constants.GREMLIN_HADOOP_JARS_IN_distributeD_CACHE,true);
        hadoopConfig.setProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION,"none");
        hadoopConfig.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,"output");
        hadoopConfig.setProperty(Constants.GREMLIN_SPARK_PERSIST_CONTEXT,true);


        hadoopConfig.setProperty("janusgraphmr.ioformat.conf.storage.backend","cql");
        hadoopConfig.setProperty("janusgraphmr.ioformat.conf.storage.hostname",CASSANDRA_ADDRESS);
        hadoopConfig.setProperty("janusgraphmr.ioformat.conf.storage.port","9042");
        hadoopConfig.setProperty("janusgraphmr.ioformat.conf.storage.cassandra.keyspace",KEY_SPACE);


        hadoopConfig.setProperty("cassandra.input.partitioner.class","org.apache.cassandra.dht.Murmur3Partitioner");
        hadoopConfig.setProperty("cassandra.input.widerows",true);

        if (local) {
            hadoopConfig.setProperty("spark.master","local[*]"); // Run Spark locally with as many worker threads as logical cores on your machine.
        } else {
            hadoopConfig.setProperty("spark.master","spark://ADD_YOUR_URL");
        }
        hadoopConfig.setProperty("spark.executor.memory","2g");
        hadoopConfig.setProperty(Constants.SPARK_SERIALIZER,"org.apache.spark.serializer.KryoSerializer");
        hadoopConfig.setProperty("spark.kryo.registrator","org.janusgraph.hadoop.serialize.JanusGraphKryoRegistrator");


        hadoopConfig.setProperty("storage.hostname",CASSANDRA_ADDRESS);
        hadoopConfig.setProperty("storage.cassandra.keyspace",KEY_SPACE);


        return hadoopConfig;
    }


}

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.ibc</groupId>
    <artifactId>sparkdemo</artifactId>
    <version>1.0-SNAPSHOT</version>

    <properties>
        <janus.version>0.5.1</janus.version>
        <spark.version>2.4.0</spark.version>
        <gremlin.version>3.4.6</gremlin.version>
    </properties>


    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <configuration>
                    <source>8</source>
                    <target>8</target>
                </configuration>
            </plugin>

            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>3.2.2</version>
                <configuration>
                    <filters>
                        <filter>
                            <artifact>*:*</artifact>
                            <excludes>
                                <exclude>meta-inf/*.SF</exclude>
                                <exclude>meta-inf/*.DSA</exclude>
                                <exclude>meta-inf/*.RSA</exclude>
                            </excludes>
                        </filter>
                    </filters>
                </configuration>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

    <dependencies>

        <dependency>
            <groupId>org.janusgraph</groupId>
            <artifactId>janusgraph-cassandra</artifactId>
            <version>${janus.version}</version>
        </dependency>

        <dependency>
            <groupId>org.janusgraph</groupId>
            <artifactId>janusgraph-hadoop</artifactId>
            <version>${janus.version}</version>
        </dependency>

        <dependency>
            <groupId>org.janusgraph</groupId>
            <artifactId>janusgraph-cql</artifactId>
            <version>${janus.version}</version>
        </dependency>

        <dependency>
            <groupId>org.janusgraph</groupId>
            <artifactId>janusgraph-solr</artifactId>
            <version>${janus.version}</version>
        </dependency>

        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-classic</artifactId>
            <version>1.2.3</version>
        </dependency>

        <!-- GREMLIN -->
        <dependency>
            <groupId>org.apache.tinkerpop</groupId>
            <artifactId>spark-gremlin</artifactId>
            <version>${gremlin.version}</version>
            <exclusions>
                <exclusion>
                    <groupId>com.fasterxml.jackson.core</groupId>
                    <artifactId>jackson-databind</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>com.google.guava</groupId>
                    <artifactId>guava</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.tinkerpop</groupId>
            <artifactId>hadoop-gremlin</artifactId>
            <version>${gremlin.version}</version>
        </dependency>

        <!-- SPARK -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.12</artifactId>
            <version>${spark.version}</version>
            <exclusions>
                <exclusion>
                    <groupId>com.fasterxml.jackson.core</groupId>
                    <artifactId>jackson-databind</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>com.google.guava</groupId>
            <artifactId>guava</artifactId>
            <version>19.0</version>
        </dependency>



    </dependencies>


</project>

输出如下:


23:36:29,708 |-INFO in ch.qos.logback.classic.joran.action.RootLoggerAction - Setting level of ROOT logger to WARN
23:36:29,708 |-INFO in ch.qos.logback.core.joran.action.AppenderRefAction - Attaching appender named [CONSOLE] to Logger[ROOT]
23:36:29,708 |-INFO in ch.qos.logback.classic.joran.action.ConfigurationAction - End of configuration.
23:36:29,710 |-INFO in ch.qos.logback.classic.joran.JoranConfigurator@704d6e83 - Registering current configuration as safe fallback point

SLF4J: Actual binding is of type [ch.qos.logback.classic.util.ContextSelectorStaticBinder]
23:36:30.225 [main] WARN  o.a.t.g.s.p.c.SparkGraphComputer - class org.apache.hadoop.mapreduce.lib.output.NullOutputFormat does not implement PersistResultGraphAware and thus,persistence options are unkNown -- assuming all options are possible
23:36:30.516 [SparkGraphComputer-boss] WARN  org.apache.spark.util.Utils - Your hostname,nchristidis-GL502VMK resolves to a loopback address: 127.0.1.1; using 192.168.1.12 instead (on interface wlp3s0)
23:36:30.516 [SparkGraphComputer-boss] WARN  org.apache.spark.util.Utils - Set SPARK_LOCAL_IP if you need to bind to another address
23:36:32.191 [SparkGraphComputer-boss] WARN  o.a.hadoop.util.NativeCodeLoader - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
23:36:33.279 [SparkGraphComputer-boss] WARN  o.a.t.g.s.p.c.SparkGraphComputer - HADOOP_GREMLIN_LIBS is not set -- proceeding regardless
23:36:35.266 [SparkGraphComputer-boss] WARN  com.datastax.driver.core.NettyUtil - Found Netty's native epoll transport in the classpath,but epoll is not available. Using NIO instead.

IT WORKED: 43
23:39:32.111 [Thread-3] WARN  org.apache.spark.SparkContext - Ignoring Exception while stopping SparkContext from shutdown hook
java.lang.NoSuchMethodError: io.netty.bootstrap.ServerBootstrap.config()Lio/netty/bootstrap/ServerBootstrapConfig;
    at org.apache.spark.network.server.TransportServer.close(TransportServer.java:154)
    at org.apache.spark.network.netty.NettyBlockTransferService.close(NettyBlockTransferService.scala:180)
    at org.apache.spark.storage.BlockManager.stop(BlockManager.scala:1615)
    at org.apache.spark.SparkEnv.stop(SparkEnv.scala:90)
    at org.apache.spark.SparkContext$$anonfun$stop$11.apply$mcV$sp(SparkContext.scala:1974)
    at org.apache.spark.util.Utils$.tryLogNonFatalError(Utils.scala:1340)
    at org.apache.spark.SparkContext.stop(SparkContext.scala:1973)
    at org.apache.spark.SparkContext$$anonfun$2.apply$mcV$sp(SparkContext.scala:575)
    at org.apache.spark.util.SparkShutdownHook.run(ShutdownHookManager.scala:216)
    at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ShutdownHookManager.scala:188)
    at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
    at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
    at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1945)
    at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply$mcV$sp(ShutdownHookManager.scala:188)
    at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
    at org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
    at scala.util.Try$.apply(Try.scala:192)
    at org.apache.spark.util.SparkShutdownHookManager.runAll(ShutdownHookManager.scala:188)
    at org.apache.spark.util.SparkShutdownHookManager$$anon$2.run(ShutdownHookManager.scala:178)
    at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)

Process finished with exit code 123

所以我得到正确的输出IT WORKED: 43 43是全部顶点,但是花费的时间太长。

此日志消息:

23:36:33.279 [SparkGraphComputer-boss] WARN  o.a.t.g.s.p.c.SparkGraphComputer - HADOOP_GREMLIN_LIBS is not set -- proceeding regardless

突出显示我很可能没有正确设置某些东西。

============================================ ====================
更新:10月27日,星期二

通过将程序提交到具有一个从属节点的Spark集群,并且不通过本地IDE运行它,我的时间从6分钟大幅减少到3分钟。

enter image description here

解决方法

基于OLAP的Gremlin遍历将比标准OLTP遍历慢得多,即使对于小型数据集也是如此。仅准备好Spark来处理遍历就需要花费大量成本。仅这些开销就可以轻松地使您的OLAP查询比OLTP差1分钟。在对问题的评论中,您解释了查询大约需要六分钟。从长远来看,这似乎有些长远,但对于OLAP而言,这可能是正常情况,具体取决于您的环境?

一些图将针对OLAP count()进行优化,并为您带来非常快速的结果,但是您将此问题标记为“ JanusGraph”,所以我认为这不适用于此。

在开始关注大型图形之前,通常看不到基于OLAP遍历的值。比较一下OLAP和OLTP的100+百万个边缘,您可能根本不介意等待六分钟的答案(因为OLTP可能根本无法完成)。

很难说您可以做些什么来使当前设置更快,因为您实际上只是在证明这一点上一切正常。现在您已经有了一个可行的模型,我建议下一步是生成一个更大的图形(也许有1000万个顶点),并使用合适大小的火花簇再次尝试计数。