如何解决psycopg2.OperationalError:SCRAM身份验证需要libpq版本10或更高版本?

问题描述

我尝试对Postgresql执行以下连接字符串,但收到操作错误:SCRAM身份验证问题。

这是连接字符串代码

Py4JJavaError                             Traceback (most recent call last)
<command-780007467828035> in <module>
 ----> 1 df2.show()

 /databricks/spark/python/pyspark/sql/dataframe.py in show(self,n,truncate,vertical)
382         """
383         if isinstance(truncate,bool) and truncate:
--> 384             print(self._jdf.showString(n,20,vertical))
385         else:
386             print(self._jdf.showString(n,int(truncate),vertical))

/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self,*args)
1255         answer = self.gateway_client.send_command(command)
1256         return_value = get_return_value(
-> 1257             answer,self.gateway_client,self.target_id,self.name)
1258 
1259         for temp_arg in temp_args:

/databricks/spark/python/pyspark/sql/utils.py in deco(*a,**kw)
 61     def deco(*a,**kw):
 62         try:
 ---> 63             return f(*a,**kw)
 64         except py4j.protocol.Py4JJavaError as e:
 65             s = e.java_exception.toString()

 /databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer,gateway_client,target_id,name)
326                 raise Py4JJavaError(
327                     "An error occurred while calling {0}{1}{2}.\n".
--> 328                     format(target_id,".",name),value)
329             else:
330                 raise Py4JError(

Py4JJavaError: An error occurred while calling o804.showString.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 231.0 Failed 4    times,most recent failure: Lost task 0.3 in stage 231.0 (TID 6106,10.52.98.16,executor 0): com.databricks.sql.io.FileReadException: Error while reading file dbfs:/user/hive/warehouse/p_suggestedpricefornegotiation/part-00000-e64f3491-8afe-44a9-a55d-3495bc7a1395-c000.snappy.parquet. A file referenced in the transaction log cannot be found. This occurs when data has been manually deleted from the file system rather than using the table `DELETE` statement. For more @R_610_4045@ion,see https://docs.microsoft.com/azure/databricks/delta/delta-intro#frequently-asked-questions
at   org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anon$2.logFileNameAndThrow(FileScanRDD.scala:331)
at  org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anon$2.getNext(FileScanRDD.scala:297)
at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73)
at  org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anonfun$prepareNextFile$1.apply(FileScanRDD.scala:463)
at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anonfun$prepareNextFile$1.apply(FileScanRDD.scala:451)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
at  org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable$$anonfun$run$1.apply$mcV$sp(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable$$anonfun$run$1.apply(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable$$anonfun$run$1.apply(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at org.apache.spark.util.threads.SparkThreadLocalCapturingHelper$class.runWithCaptured(SparkThreadLocalForwardingThreadPoolExecutor.scala:68)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable.runWithCaptured(SparkThreadLocalForwardingThreadPoolExecutor.scala:101)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable.run(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
 Caused by: java.io.FileNotFoundException:  dbfs:/user/hive/warehouse/p_suggestedpricefornegotiation/part-00000-e64f3491-8afe-44a9-a55d-3495bc7a1395-c000.snappy.parquet
at  com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1$$anonfun$apply$15.apply(DatabricksFileSystemV2.scala:770)
at com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1$$anonfun$apply$15.apply(DatabricksFileSystemV2.scala:756)
at com.databricks.s3a.S3AExeceptionUtils$.convertAWSExceptionToJavaIOException(DatabricksstreamUtils.scala:108)
at com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1.apply(DatabricksFileSystemV2.scala:756)
at com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1.apply(DatabricksFileSystemV2.scala:756)
at com.databricks.logging.UsageLogging$$anonfun$recordOperation$1.apply(UsageLogging.scala:428)
at com.databricks.logging.UsageLogging$$anonfun$withAttributionContext$1.apply(UsageLogging.scala:238)
at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
at com.databricks.logging.UsageLogging$class.withAttributionContext(UsageLogging.scala:233)
at com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.withAttributionContext(DatabricksFileSystemV2.scala:450)
at com.databricks.logging.UsageLogging$class.withAttributionTags(UsageLogging.scala:275)
at  com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.withAttributionTags(DatabricksFileSystemV2.scala:450)
at com.databricks.logging.UsageLogging$class.recordOperation(UsageLogging.scala:409)
at  com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.recordOperation(DatabricksFileSystemV2.scala:450)
at com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.getFileStatus(DatabricksFileSystemV2.scala:755)
at com.databricks.backend.daemon.data.client.DatabricksFileSystem.getFileStatus(DatabricksFileSystem.scala:201)
at  com.databricks.spark.metrics.FileSystemWithMetrics.getFileStatus(FileSystemWithMetrics.scala:295)
at org.apache.parquet.hadoop.util.HadoopInputFile.fromPath(HadoopInputFile.java:39)
at org.apache.parquet.hadoop.ParquetFileReader.readFooter(ParquetFileReader.java:452)
at com.databricks.sql.io.parquet.CachingParquetFileReader.readFooter(CachingParquetFileReader.java:366)
at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.prepare(SpecificParquetRecordReaderBase.java:128)
at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anonfun$buildreaderWithPartitionValues$1.apply(ParquetFileFormat.scala:477)
at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anonfun$buildreaderWithPartitionValues$1.apply(ParquetFileFormat.scala:390)
at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anon$2.getNext(FileScanRDD.scala:281)
... 14 more

Driver stacktrace:
at   org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndindependentStages(DAGScheduler.scala:2362)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2350)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2349)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2349)
at  org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1102)
at  org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1102)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1102)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2582)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2529)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2517)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:897)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2280)
at org.apache.spark.sql.execution.collect.Collector.runSparkJobs(Collector.scala:270)
at org.apache.spark.sql.execution.collect.Collector.collect(Collector.scala:280)
at org.apache.spark.sql.execution.collect.Collector$.collect(Collector.scala:80)
at org.apache.spark.sql.execution.collect.Collector$.collect(Collector.scala:86)
at  org.apache.spark.sql.execution.ResultCacheManager.getorComputeResult(ResultCacheManager.scala:508)
at org.apache.spark.sql.execution.CollectLimitExec.executeCollectResult(limit.scala:57)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectResult(Dataset.scala:2905)
at  org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3517)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2634)
at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2634)
at org.apache.spark.sql.Dataset$$anonfun$54.apply(Dataset.scala:3501)
at org.apache.spark.sql.Dataset$$anonfun$54.apply(Dataset.scala:3496)
at  org.apache.spark.sql.execution.sqlExecution$$anonfun$withCustomExecutionEnv$1$$anonfun$apply$1.apply(sqlExecution.scala:112)
at org.apache.spark.sql.execution.sqlExecution$.withsqlConfPropagated(sqlExecution.scala:232)
at  org.apache.spark.sql.execution.sqlExecution$$anonfun$withCustomExecutionEnv$1.apply(sqlExecution.scala:98)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:835)
at org.apache.spark.sql.execution.sqlExecution$.withCustomExecutionEnv(sqlExecution.scala:74)
at org.apache.spark.sql.execution.sqlExecution$.withNewExecutionId(sqlExecution.scala:184)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withAction(Dataset.scala:3496)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2634)
at org.apache.spark.sql.Dataset.take(Dataset.scala:2848)
at org.apache.spark.sql.Dataset.getRows(Dataset.scala:279)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:316)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)
at py4j.Gateway.invoke(Gateway.java:295)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:251)
at java.lang.Thread.run(Thread.java:748)
 Caused by: com.databricks.sql.io.FileReadException: Error while reading file      dbfs:/user/hive/warehouse/p_suggestedpricefornegotiation/part-00000-e64f3491-8afe-44a9-a55d-3495bc7a1395-c000.snappy.parquet. A file referenced in the transaction log cannot be found. This occurs when data has been manually deleted from the file system rather than using the table `DELETE` statement. For more @R_610_4045@ion,see https://docs.microsoft.com/azure/databricks/delta/delta-intro#frequently-asked-questions
at   org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anon$2.logFileNameAndThrow(FileScanRDD.scala:331)
at  org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anon$2.getNext(FileScanRDD.scala:297)
at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73)
at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anonfun$prepareNextFile$1.apply(FileScanRDD.scala:463)
at  org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anonfun$prepareNextFile$1.apply(FileScanRDD.scala:451)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable$$anonfun$run$1.apply$mcV$sp(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable$$anonfun$run$1.apply(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable$$anonfun$run$1.apply(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at  org.apache.spark.util.threads.SparkThreadLocalCapturingHelper$class.runWithCaptured(SparkThreadLocalForwardingThreadPoolExecutor.scala:68)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable.runWithCaptured(SparkThreadLocalForwardingThreadPoolExecutor.scala:101)
at org.apache.spark.util.threads.SparkThreadLocalCapturingRunnable.run(SparkThreadLocalForwardingThreadPoolExecutor.scala:104)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more
Caused by: java.io.FileNotFoundException:   dbfs:/user/hive/warehouse/p_suggestedpricefornegotiation/part-00000-e64f3491-8afe-44a9-a55d-3495bc7a1395-c000.snappy.parquet
at   com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1$$anonfun$apply$15.apply(DatabricksFileSystemV2.scala:770)
at   com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1$$anonfun$apply$15.apply(DatabricksFileSystemV2.scala:756)
at  com.databricks.s3a.S3AExeceptionUtils$.convertAWSExceptionToJavaIOException(DatabricksstreamUtils.scala:108)
at  com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1.apply(DatabricksFileSystemV2.scala:756)
at  com.databricks.backend.daemon.data.client.DatabricksFileSystemV2$$anonfun$getFileStatus$1.apply(DatabricksFileSystemV2.scala:756)
at com.databricks.logging.UsageLogging$$anonfun$recordOperation$1.apply(UsageLogging.scala:428)
at  com.databricks.logging.UsageLogging$$anonfun$withAttributionContext$1.apply(UsageLogging.scala:238)
at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
at com.databricks.logging.UsageLogging$class.withAttributionContext(UsageLogging.scala:233)
at   com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.withAttributionContext(DatabricksFileSystemV2.scala:450)
at com.databricks.logging.UsageLogging$class.withAttributionTags(UsageLogging.scala:275)
at  com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.withAttributionTags(DatabricksFileSystemV2.scala:450)
at com.databricks.logging.UsageLogging$class.recordOperation(UsageLogging.scala:409)
at com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.recordOperation(DatabricksFileSystemV2.scala:450)
at com.databricks.backend.daemon.data.client.DatabricksFileSystemV2.getFileStatus(DatabricksFileSystemV2.scala:755)
at  com.databricks.backend.daemon.data.client.DatabricksFileSystem.getFileStatus(DatabricksFileSystem.scala:201)
at  com.databricks.spark.metrics.FileSystemWithMetrics.getFileStatus(FileSystemWithMetrics.scala:295)
at org.apache.parquet.hadoop.util.HadoopInputFile.fromPath(HadoopInputFile.java:39)
at org.apache.parquet.hadoop.ParquetFileReader.readFooter(ParquetFileReader.java:452)
at com.databricks.sql.io.parquet.CachingParquetFileReader.readFooter(CachingParquetFileReader.java:366)
at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.prepare(SpecificParquetRecordReaderBase.java:128)
at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anonfun$buildreaderWithPartitionValues$1.apply(ParquetFileFormat.scala:477)
at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anonfun$buildreaderWithPartitionValues$1.apply(ParquetFileFormat.scala:390)
at   org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1$$anon$2.getNext(FileScanRDD.scala:281)
... 14 more

控制台:

DB_session= psycopg2.connect(database="postgres",user="postgres",password="test",host="some remote URL",port="1534")
DB_cursor= DB_session.cursor()

需要有关python 2.7和3.6的连接字符串的帮助。无法访问或修改Postgresql配置文件,限制。替代SCRAM身份验证,md5连接字符串也将有所帮助。

解决方法

使用 pip 二进制包 psycopg2-binary 安装 psycopg2。它打包了支持 scram 身份验证所需的依赖项。