问题描述
我正在测试Google Cloud pub / sub的流处理。 将消息从发布者转发到主题,在apache-beam上的pub / sub上阅读消息,并使用beam.Map(print)进行检查。
从发布/订阅中读取消息,可以正常工作。但是,阅读所有消息后发生了错误。
ㅡ。该代码将消息从发布者传递到主题
from google.cloud import pubsub_v1
from google.cloud import bigquery
import time
# TODO(developer)
project_id = [your-project-id]
topic_id = [your-topic-id]
# Construct a BigQuery client object.
client = bigquery.Client()
# Configure the batch to publish as soon as there is ten messages,# one kilobyte of data,or one second has passed.
batch_settings = pubsub_v1.types.BatchSettings(
max_messages=10,# default 100
max_bytes=1024,# default 1 MB
max_latency=1,# default 10 ms'
)
publisher = pubsub_v1.PublisherClient(batch_settings)
topic_path = publisher.topic_path(project_id,topic_id)
query = """
SELECT *
FROM `[bigquery-schema.bigquery-dataset.bigquery-tablename]`
LIMIT 20
"""
query_job = client.query(query)
# Resolve the publish future in a separate thread.
def callback(topic_message):
message_id = topic_message.result()
print(message_id)
print("The query data:")
for row in query_job:
data = u"category={},language={},count={}".format(row[0],row[1],row[2])
print(data)
data = data.encode("utf-8")
time.sleep(1)
topic_message = publisher.publish(topic_path,data=data)
topic_message.add_done_callback(callback)
print("Published messages with batch settings.")
ㅡ。 Apache-beam代码[用于读取和处理pub / sub中的数据]
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License,Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,software
# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START pubsub_to_gcs]
import argparse
import datetime
import json
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import apache_beam.transforms.window as window
pipeline_options = PipelineOptions(
streaming=True,save_main_session=True,runner='DirectRunner',return_immediately=True,initial_rpc_timeout_millis=25000,)
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries,where each contains one message
and its publish timestamp.
"""
def __init__(self,window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self,pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None,elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _,val: val)
)
class AddTimestamps(beam.DoFn):
def process(self,element,publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),}
class WriteBatchesToGCS(beam.DoFn):
def __init__(self,output_path):
self.output_path = output_path
def process(self,batch,window=beam.DoFn.WindowParam):
"""Write one batch per file to a Google Cloud Storage bucket. """
ts_format = "%H:%M"
window_start = window.start.to_utc_datetime().strftime(ts_format)
window_end = window.end.to_utc_datetime().strftime(ts_format)
filename = "-".join([self.output_path,window_start,window_end])
with beam.io.gcp.gcsio.GcsIO().open(filename=filename,mode="w") as f:
for element in batch:
f.write("{}\n".format(json.dumps(element)).encode("utf-8"))
class test_func(beam.DoFn) :
def __init__(self,delimiter=','):
self.delimiter = delimiter
def process(self,topic_message):
print(topic_message)
def run(input_topic,output_path,window_size=1.0,pipeline_args=None):
# `save_main_session` is set to true because some DoFn's rely on
# globally imported modules.
pipeline_options = PipelineOptions(
pipeline_args,streaming=True,save_main_session=True
)
with beam.Pipeline(options=pipeline_options) as pipeline:
(
pipeline
| "Read PubSub Messages"
>> beam.io.ReadFromPubSub(topic=input_topic)
| "Pardo" >> beam.ParDo(test_func(','))
)
if __name__ == "__main__": # noqa
input_topic = 'projects/[project-id]/topics/[pub/sub-name]'
output_path = 'gs://[bucket-name]/[file-directory]'
run(input_topic,2)
# [END pubsub_to_gcs]
作为临时措施,我设置了return_immediately=True
。但是,这也不是根本的解决方案。
谢谢您阅读。
解决方法
这似乎是其他SO thread中报告的PubSub库的一个已知问题,并且看起来它最近已用版本1.4.2解决,但尚未包含在BEAM dependencies中使用google-cloud-pubsub>=0.39.0,<1.1.0
。
我进行了一些研究,发现DataflowRunner
似乎比Apache Beam团队维护的DirectRunner
更能解决此错误。该问题已在beam site上报告,但尚未解决。
还请注意,here可以找到DEADLINE_EXCEEDED
错误的故障排除指南。您可以检查提出的建议是否对您有帮助,例如升级到最新版本的客户端库。