From 4ae5afda00746f5137ab022c939e2a6bc8d3e5be Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Wed, 14 Aug 2024 22:58:08 +0000 Subject: [PATCH] docs: changed API title to official name PiperOrigin-RevId: 662676310 Source-Link: https://github.com/googleapis/googleapis/commit/e813a85a1455e44c4c93314d8c2fdf3d77516916 Source-Link: https://github.com/googleapis/googleapis-gen/commit/5b69026c55c42bc69280062dc33e579a666ab81e Copy-Tag: eyJwIjoicGFja2FnZXMvZ29vZ2xlLWNsb3VkLW1hbmFnZWRrYWZrYS8uT3dsQm90LnlhbWwiLCJoIjoiNWI2OTAyNmM1NWM0MmJjNjkyODAwNjJkYzMzZTU3OWE2NjZhYjgxZSJ9 --- .../google-cloud-managedkafka/v1/.coveragerc | 13 + .../google-cloud-managedkafka/v1/.flake8 | 33 + .../google-cloud-managedkafka/v1/MANIFEST.in | 2 + .../google-cloud-managedkafka/v1/README.rst | 49 + .../v1/docs/_static/custom.css | 3 + .../google-cloud-managedkafka/v1/docs/conf.py | 376 + .../v1/docs/index.rst | 7 + .../v1/docs/managedkafka_v1/managed_kafka.rst | 10 + .../v1/docs/managedkafka_v1/services_.rst | 6 + .../v1/docs/managedkafka_v1/types_.rst | 6 + .../v1/google/cloud/managedkafka/__init__.py | 83 + .../cloud/managedkafka/gapic_version.py | 16 + .../v1/google/cloud/managedkafka/py.typed | 2 + .../google/cloud/managedkafka_v1/__init__.py | 84 + .../cloud/managedkafka_v1/gapic_metadata.json | 238 + .../cloud/managedkafka_v1/gapic_version.py | 16 + .../v1/google/cloud/managedkafka_v1/py.typed | 2 + .../managedkafka_v1/services/__init__.py | 15 + .../services/managed_kafka/__init__.py | 22 + .../services/managed_kafka/async_client.py | 2192 +++ .../services/managed_kafka/client.py | 2557 ++++ .../services/managed_kafka/pagers.py | 433 + .../managed_kafka/transports/__init__.py | 38 + .../services/managed_kafka/transports/base.py | 452 + .../services/managed_kafka/transports/grpc.py | 740 + .../managed_kafka/transports/grpc_asyncio.py | 869 ++ .../services/managed_kafka/transports/rest.py | 2264 +++ .../cloud/managedkafka_v1/types/__init__.py | 78 + .../managedkafka_v1/types/managed_kafka.py | 578 + .../cloud/managedkafka_v1/types/resources.py | 438 + .../google-cloud-managedkafka/v1/mypy.ini | 3 + .../google-cloud-managedkafka/v1/noxfile.py | 278 + ...ated_managed_kafka_create_cluster_async.py | 63 + ...rated_managed_kafka_create_cluster_sync.py | 63 + ...erated_managed_kafka_create_topic_async.py | 58 + ...nerated_managed_kafka_create_topic_sync.py | 58 + ...ated_managed_kafka_delete_cluster_async.py | 56 + ...rated_managed_kafka_delete_cluster_sync.py | 56 + ...naged_kafka_delete_consumer_group_async.py | 50 + ...anaged_kafka_delete_consumer_group_sync.py | 50 + ...erated_managed_kafka_delete_topic_async.py | 50 + ...nerated_managed_kafka_delete_topic_sync.py | 50 + ...nerated_managed_kafka_get_cluster_async.py | 52 + ...enerated_managed_kafka_get_cluster_sync.py | 52 + ..._managed_kafka_get_consumer_group_async.py | 52 + ...d_managed_kafka_get_consumer_group_sync.py | 52 + ...generated_managed_kafka_get_topic_async.py | 52 + ..._generated_managed_kafka_get_topic_sync.py | 52 + ...rated_managed_kafka_list_clusters_async.py | 53 + ...erated_managed_kafka_list_clusters_sync.py | 53 + ...anaged_kafka_list_consumer_groups_async.py | 53 + ...managed_kafka_list_consumer_groups_sync.py | 53 + ...nerated_managed_kafka_list_topics_async.py | 53 + ...enerated_managed_kafka_list_topics_sync.py | 53 + ...ated_managed_kafka_update_cluster_async.py | 61 + ...rated_managed_kafka_update_cluster_sync.py | 61 + ...naged_kafka_update_consumer_group_async.py | 51 + ...anaged_kafka_update_consumer_group_sync.py | 51 + ...erated_managed_kafka_update_topic_async.py | 56 + ...nerated_managed_kafka_update_topic_sync.py | 56 + ...metadata_google.cloud.managedkafka.v1.json | 2313 +++ .../scripts/fixup_managedkafka_v1_keywords.py | 189 + .../google-cloud-managedkafka/v1/setup.py | 93 + .../v1/testing/constraints-3.10.txt | 6 + .../v1/testing/constraints-3.11.txt | 6 + .../v1/testing/constraints-3.12.txt | 6 + .../v1/testing/constraints-3.7.txt | 10 + .../v1/testing/constraints-3.8.txt | 6 + .../v1/testing/constraints-3.9.txt | 6 + .../v1/tests/__init__.py | 16 + .../v1/tests/unit/__init__.py | 16 + .../v1/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/managedkafka_v1/__init__.py | 16 + .../managedkafka_v1/test_managed_kafka.py | 12567 ++++++++++++++++ 74 files changed, 28689 insertions(+) create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/.coveragerc create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/.flake8 create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/MANIFEST.in create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/README.rst create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/docs/_static/custom.css create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/docs/conf.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/docs/index.rst create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/managed_kafka.rst create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/services_.rst create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/types_.rst create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/gapic_version.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/py.typed create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_metadata.json create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_version.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/py.typed create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/async_client.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/client.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/pagers.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/base.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/rest.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/managed_kafka.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/resources.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/mypy.ini create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/noxfile.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_async.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_sync.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/snippet_metadata_google.cloud.managedkafka.v1.json create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/scripts/fixup_managedkafka_v1_keywords.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/setup.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.10.txt create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.11.txt create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.12.txt create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.7.txt create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.8.txt create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.9.txt create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/tests/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/__init__.py create mode 100644 owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/test_managed_kafka.py diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/.coveragerc b/owl-bot-staging/google-cloud-managedkafka/v1/.coveragerc new file mode 100644 index 000000000000..a862c1bb99ef --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/managedkafka/__init__.py + google/cloud/managedkafka/gapic_version.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/.flake8 b/owl-bot-staging/google-cloud-managedkafka/v1/.flake8 new file mode 100644 index 000000000000..29227d4cf419 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/MANIFEST.in b/owl-bot-staging/google-cloud-managedkafka/v1/MANIFEST.in new file mode 100644 index 000000000000..4384023db8a6 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/managedkafka *.py +recursive-include google/cloud/managedkafka_v1 *.py diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/README.rst b/owl-bot-staging/google-cloud-managedkafka/v1/README.rst new file mode 100644 index 000000000000..bbc75aead39d --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Managedkafka API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Managedkafka API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/docs/_static/custom.css b/owl-bot-staging/google-cloud-managedkafka/v1/docs/_static/custom.css new file mode 100644 index 000000000000..06423be0b592 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/docs/_static/custom.css @@ -0,0 +1,3 @@ +dl.field-list > dt { + min-width: 100px +} diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/docs/conf.py b/owl-bot-staging/google-cloud-managedkafka/v1/docs/conf.py new file mode 100644 index 000000000000..6f18390c4187 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-managedkafka documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "4.0.1" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = u"google-cloud-managedkafka" +copyright = u"2023, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-managedkafka-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-cloud-managedkafka.tex", + u"google-cloud-managedkafka Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-cloud-managedkafka", + u"Google Cloud Managedkafka Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-cloud-managedkafka", + u"google-cloud-managedkafka Documentation", + author, + "google-cloud-managedkafka", + "GAPIC library for Google Cloud Managedkafka API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/docs/index.rst b/owl-bot-staging/google-cloud-managedkafka/v1/docs/index.rst new file mode 100644 index 000000000000..fe4547b719ae --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + managedkafka_v1/services + managedkafka_v1/types diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/managed_kafka.rst b/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/managed_kafka.rst new file mode 100644 index 000000000000..5ed21891a7fc --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/managed_kafka.rst @@ -0,0 +1,10 @@ +ManagedKafka +------------------------------ + +.. automodule:: google.cloud.managedkafka_v1.services.managed_kafka + :members: + :inherited-members: + +.. automodule:: google.cloud.managedkafka_v1.services.managed_kafka.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/services_.rst b/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/services_.rst new file mode 100644 index 000000000000..3b761cc00c41 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/services_.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Managedkafka v1 API +============================================= +.. toctree:: + :maxdepth: 2 + + managed_kafka diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/types_.rst b/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/types_.rst new file mode 100644 index 000000000000..4e78607757cc --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/docs/managedkafka_v1/types_.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Managedkafka v1 API +========================================== + +.. automodule:: google.cloud.managedkafka_v1.types + :members: + :show-inheritance: diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/__init__.py new file mode 100644 index 000000000000..02641202aa1c --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/__init__.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.managedkafka import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.cloud.managedkafka_v1.services.managed_kafka.client import ManagedKafkaClient +from google.cloud.managedkafka_v1.services.managed_kafka.async_client import ManagedKafkaAsyncClient + +from google.cloud.managedkafka_v1.types.managed_kafka import CreateClusterRequest +from google.cloud.managedkafka_v1.types.managed_kafka import CreateTopicRequest +from google.cloud.managedkafka_v1.types.managed_kafka import DeleteClusterRequest +from google.cloud.managedkafka_v1.types.managed_kafka import DeleteConsumerGroupRequest +from google.cloud.managedkafka_v1.types.managed_kafka import DeleteTopicRequest +from google.cloud.managedkafka_v1.types.managed_kafka import GetClusterRequest +from google.cloud.managedkafka_v1.types.managed_kafka import GetConsumerGroupRequest +from google.cloud.managedkafka_v1.types.managed_kafka import GetTopicRequest +from google.cloud.managedkafka_v1.types.managed_kafka import ListClustersRequest +from google.cloud.managedkafka_v1.types.managed_kafka import ListClustersResponse +from google.cloud.managedkafka_v1.types.managed_kafka import ListConsumerGroupsRequest +from google.cloud.managedkafka_v1.types.managed_kafka import ListConsumerGroupsResponse +from google.cloud.managedkafka_v1.types.managed_kafka import ListTopicsRequest +from google.cloud.managedkafka_v1.types.managed_kafka import ListTopicsResponse +from google.cloud.managedkafka_v1.types.managed_kafka import UpdateClusterRequest +from google.cloud.managedkafka_v1.types.managed_kafka import UpdateConsumerGroupRequest +from google.cloud.managedkafka_v1.types.managed_kafka import UpdateTopicRequest +from google.cloud.managedkafka_v1.types.resources import AccessConfig +from google.cloud.managedkafka_v1.types.resources import CapacityConfig +from google.cloud.managedkafka_v1.types.resources import Cluster +from google.cloud.managedkafka_v1.types.resources import ConsumerGroup +from google.cloud.managedkafka_v1.types.resources import ConsumerPartitionMetadata +from google.cloud.managedkafka_v1.types.resources import ConsumerTopicMetadata +from google.cloud.managedkafka_v1.types.resources import GcpConfig +from google.cloud.managedkafka_v1.types.resources import NetworkConfig +from google.cloud.managedkafka_v1.types.resources import OperationMetadata +from google.cloud.managedkafka_v1.types.resources import RebalanceConfig +from google.cloud.managedkafka_v1.types.resources import Topic + +__all__ = ('ManagedKafkaClient', + 'ManagedKafkaAsyncClient', + 'CreateClusterRequest', + 'CreateTopicRequest', + 'DeleteClusterRequest', + 'DeleteConsumerGroupRequest', + 'DeleteTopicRequest', + 'GetClusterRequest', + 'GetConsumerGroupRequest', + 'GetTopicRequest', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListConsumerGroupsRequest', + 'ListConsumerGroupsResponse', + 'ListTopicsRequest', + 'ListTopicsResponse', + 'UpdateClusterRequest', + 'UpdateConsumerGroupRequest', + 'UpdateTopicRequest', + 'AccessConfig', + 'CapacityConfig', + 'Cluster', + 'ConsumerGroup', + 'ConsumerPartitionMetadata', + 'ConsumerTopicMetadata', + 'GcpConfig', + 'NetworkConfig', + 'OperationMetadata', + 'RebalanceConfig', + 'Topic', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/gapic_version.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/gapic_version.py new file mode 100644 index 000000000000..558c8aab67c5 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/py.typed b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/py.typed new file mode 100644 index 000000000000..07f293eccc33 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-managedkafka package uses inline types. diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/__init__.py new file mode 100644 index 000000000000..ed5b61526305 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/__init__.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.managedkafka_v1 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.managed_kafka import ManagedKafkaClient +from .services.managed_kafka import ManagedKafkaAsyncClient + +from .types.managed_kafka import CreateClusterRequest +from .types.managed_kafka import CreateTopicRequest +from .types.managed_kafka import DeleteClusterRequest +from .types.managed_kafka import DeleteConsumerGroupRequest +from .types.managed_kafka import DeleteTopicRequest +from .types.managed_kafka import GetClusterRequest +from .types.managed_kafka import GetConsumerGroupRequest +from .types.managed_kafka import GetTopicRequest +from .types.managed_kafka import ListClustersRequest +from .types.managed_kafka import ListClustersResponse +from .types.managed_kafka import ListConsumerGroupsRequest +from .types.managed_kafka import ListConsumerGroupsResponse +from .types.managed_kafka import ListTopicsRequest +from .types.managed_kafka import ListTopicsResponse +from .types.managed_kafka import UpdateClusterRequest +from .types.managed_kafka import UpdateConsumerGroupRequest +from .types.managed_kafka import UpdateTopicRequest +from .types.resources import AccessConfig +from .types.resources import CapacityConfig +from .types.resources import Cluster +from .types.resources import ConsumerGroup +from .types.resources import ConsumerPartitionMetadata +from .types.resources import ConsumerTopicMetadata +from .types.resources import GcpConfig +from .types.resources import NetworkConfig +from .types.resources import OperationMetadata +from .types.resources import RebalanceConfig +from .types.resources import Topic + +__all__ = ( + 'ManagedKafkaAsyncClient', +'AccessConfig', +'CapacityConfig', +'Cluster', +'ConsumerGroup', +'ConsumerPartitionMetadata', +'ConsumerTopicMetadata', +'CreateClusterRequest', +'CreateTopicRequest', +'DeleteClusterRequest', +'DeleteConsumerGroupRequest', +'DeleteTopicRequest', +'GcpConfig', +'GetClusterRequest', +'GetConsumerGroupRequest', +'GetTopicRequest', +'ListClustersRequest', +'ListClustersResponse', +'ListConsumerGroupsRequest', +'ListConsumerGroupsResponse', +'ListTopicsRequest', +'ListTopicsResponse', +'ManagedKafkaClient', +'NetworkConfig', +'OperationMetadata', +'RebalanceConfig', +'Topic', +'UpdateClusterRequest', +'UpdateConsumerGroupRequest', +'UpdateTopicRequest', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_metadata.json b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_metadata.json new file mode 100644 index 000000000000..f17b184ab212 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_metadata.json @@ -0,0 +1,238 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.managedkafka_v1", + "protoPackage": "google.cloud.managedkafka.v1", + "schema": "1.0", + "services": { + "ManagedKafka": { + "clients": { + "grpc": { + "libraryClient": "ManagedKafkaClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateTopic": { + "methods": [ + "create_topic" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteConsumerGroup": { + "methods": [ + "delete_consumer_group" + ] + }, + "DeleteTopic": { + "methods": [ + "delete_topic" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetConsumerGroup": { + "methods": [ + "get_consumer_group" + ] + }, + "GetTopic": { + "methods": [ + "get_topic" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListConsumerGroups": { + "methods": [ + "list_consumer_groups" + ] + }, + "ListTopics": { + "methods": [ + "list_topics" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateConsumerGroup": { + "methods": [ + "update_consumer_group" + ] + }, + "UpdateTopic": { + "methods": [ + "update_topic" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ManagedKafkaAsyncClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateTopic": { + "methods": [ + "create_topic" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteConsumerGroup": { + "methods": [ + "delete_consumer_group" + ] + }, + "DeleteTopic": { + "methods": [ + "delete_topic" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetConsumerGroup": { + "methods": [ + "get_consumer_group" + ] + }, + "GetTopic": { + "methods": [ + "get_topic" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListConsumerGroups": { + "methods": [ + "list_consumer_groups" + ] + }, + "ListTopics": { + "methods": [ + "list_topics" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateConsumerGroup": { + "methods": [ + "update_consumer_group" + ] + }, + "UpdateTopic": { + "methods": [ + "update_topic" + ] + } + } + }, + "rest": { + "libraryClient": "ManagedKafkaClient", + "rpcs": { + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateTopic": { + "methods": [ + "create_topic" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteConsumerGroup": { + "methods": [ + "delete_consumer_group" + ] + }, + "DeleteTopic": { + "methods": [ + "delete_topic" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetConsumerGroup": { + "methods": [ + "get_consumer_group" + ] + }, + "GetTopic": { + "methods": [ + "get_topic" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListConsumerGroups": { + "methods": [ + "list_consumer_groups" + ] + }, + "ListTopics": { + "methods": [ + "list_topics" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateConsumerGroup": { + "methods": [ + "update_consumer_group" + ] + }, + "UpdateTopic": { + "methods": [ + "update_topic" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_version.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_version.py new file mode 100644 index 000000000000..558c8aab67c5 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/py.typed b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/py.typed new file mode 100644 index 000000000000..07f293eccc33 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-managedkafka package uses inline types. diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/__init__.py new file mode 100644 index 000000000000..8f6cf068242c --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/__init__.py new file mode 100644 index 000000000000..97aa78d89090 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ManagedKafkaClient +from .async_client import ManagedKafkaAsyncClient + +__all__ = ( + 'ManagedKafkaClient', + 'ManagedKafkaAsyncClient', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/async_client.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/async_client.py new file mode 100644 index 000000000000..0619d24ab110 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/async_client.py @@ -0,0 +1,2192 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import re +from typing import Dict, Callable, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union + +from google.cloud.managedkafka_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.cloud.managedkafka_v1.services.managed_kafka import pagers +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ManagedKafkaTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ManagedKafkaGrpcAsyncIOTransport +from .client import ManagedKafkaClient + + +class ManagedKafkaAsyncClient: + """The service that a client application uses to manage Apache + Kafka clusters, topics and consumer groups. + """ + + _client: ManagedKafkaClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = ManagedKafkaClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ManagedKafkaClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = ManagedKafkaClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = ManagedKafkaClient._DEFAULT_UNIVERSE + + cluster_path = staticmethod(ManagedKafkaClient.cluster_path) + parse_cluster_path = staticmethod(ManagedKafkaClient.parse_cluster_path) + consumer_group_path = staticmethod(ManagedKafkaClient.consumer_group_path) + parse_consumer_group_path = staticmethod(ManagedKafkaClient.parse_consumer_group_path) + crypto_key_path = staticmethod(ManagedKafkaClient.crypto_key_path) + parse_crypto_key_path = staticmethod(ManagedKafkaClient.parse_crypto_key_path) + topic_path = staticmethod(ManagedKafkaClient.topic_path) + parse_topic_path = staticmethod(ManagedKafkaClient.parse_topic_path) + common_billing_account_path = staticmethod(ManagedKafkaClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ManagedKafkaClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ManagedKafkaClient.common_folder_path) + parse_common_folder_path = staticmethod(ManagedKafkaClient.parse_common_folder_path) + common_organization_path = staticmethod(ManagedKafkaClient.common_organization_path) + parse_common_organization_path = staticmethod(ManagedKafkaClient.parse_common_organization_path) + common_project_path = staticmethod(ManagedKafkaClient.common_project_path) + parse_common_project_path = staticmethod(ManagedKafkaClient.parse_common_project_path) + common_location_path = staticmethod(ManagedKafkaClient.common_location_path) + parse_common_location_path = staticmethod(ManagedKafkaClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ManagedKafkaAsyncClient: The constructed client. + """ + return ManagedKafkaClient.from_service_account_info.__func__(ManagedKafkaAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ManagedKafkaAsyncClient: The constructed client. + """ + return ManagedKafkaClient.from_service_account_file.__func__(ManagedKafkaAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ManagedKafkaClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ManagedKafkaTransport: + """Returns the transport used by the client instance. + + Returns: + ManagedKafkaTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = ManagedKafkaClient.get_transport_class + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ManagedKafkaTransport, Callable[..., ManagedKafkaTransport]]] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the managed kafka async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,ManagedKafkaTransport,Callable[..., ManagedKafkaTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the ManagedKafkaTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ManagedKafkaClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def list_clusters(self, + request: Optional[Union[managed_kafka.ListClustersRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersAsyncPager: + r"""Lists the clusters in a given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_list_clusters(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.ListClustersRequest, dict]]): + The request object. Request for ListClusters. + parent (:class:`str`): + Required. The parent location whose clusters are to be + listed. Structured like + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListClustersAsyncPager: + Response for ListClusters. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.ListClustersRequest): + request = managed_kafka.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListClustersAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_cluster(self, + request: Optional[Union[managed_kafka.GetClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Cluster: + r"""Returns the properties of a single cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_get_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.GetClusterRequest, dict]]): + The request object. Request for GetCluster. + name (:class:`str`): + Required. The name of the cluster + whose configuration to return. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Cluster: + An Apache Kafka cluster deployed in a + location. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.GetClusterRequest): + request = managed_kafka.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_cluster(self, + request: Optional[Union[managed_kafka.CreateClusterRequest, dict]] = None, + *, + parent: Optional[str] = None, + cluster: Optional[resources.Cluster] = None, + cluster_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new cluster in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_create_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=cluster, + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.CreateClusterRequest, dict]]): + The request object. Request for CreateCluster. + parent (:class:`str`): + Required. The parent region in which to create the + cluster. Structured like + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.managedkafka_v1.types.Cluster`): + Required. Configuration of the cluster to create. Its + ``name`` field is ignored. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The ID to use for the cluster, which will + become the final component of the cluster's name. The ID + must be 1-63 characters long, and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` to comply with + RFC 1035. + + This value is structured like: ``my-cluster-id``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.managedkafka_v1.types.Cluster` An + Apache Kafka cluster deployed in a location. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.CreateClusterRequest): + request = managed_kafka.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cluster is not None: + request.cluster = cluster + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + resources.Cluster, + metadata_type=resources.OperationMetadata, + ) + + # Done; return the response. + return response + + async def update_cluster(self, + request: Optional[Union[managed_kafka.UpdateClusterRequest, dict]] = None, + *, + cluster: Optional[resources.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the properties of a single cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_update_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.UpdateClusterRequest( + cluster=cluster, + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.UpdateClusterRequest, dict]]): + The request object. Request for UpdateCluster. + cluster (:class:`google.cloud.managedkafka_v1.types.Cluster`): + Required. The cluster to update. Its ``name`` field must + be populated. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the cluster resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. The mask is required + and a value of \* will update all fields. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.managedkafka_v1.types.Cluster` An + Apache Kafka cluster deployed in a location. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.UpdateClusterRequest): + request = managed_kafka.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("cluster.name", request.cluster.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + resources.Cluster, + metadata_type=resources.OperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster(self, + request: Optional[Union[managed_kafka.DeleteClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_delete_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.DeleteClusterRequest, dict]]): + The request object. Request for DeleteCluster. + name (:class:`str`): + Required. The name of the cluster to + delete. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.DeleteClusterRequest): + request = managed_kafka.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=resources.OperationMetadata, + ) + + # Done; return the response. + return response + + async def list_topics(self, + request: Optional[Union[managed_kafka.ListTopicsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTopicsAsyncPager: + r"""Lists the topics in a given cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_list_topics(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListTopicsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_topics(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.ListTopicsRequest, dict]]): + The request object. Request for ListTopics. + parent (:class:`str`): + Required. The parent cluster whose topics are to be + listed. Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListTopicsAsyncPager: + Response for ListTopics. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.ListTopicsRequest): + request = managed_kafka.ListTopicsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.list_topics] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTopicsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_topic(self, + request: Optional[Union[managed_kafka.GetTopicRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Topic: + r"""Returns the properties of a single topic. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_get_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetTopicRequest( + name="name_value", + ) + + # Make the request + response = await client.get_topic(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.GetTopicRequest, dict]]): + The request object. Request for GetTopic. + name (:class:`str`): + Required. The name of the topic whose + configuration to return. Structured + like: + + projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Topic: + A Kafka topic in a given cluster. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.GetTopicRequest): + request = managed_kafka.GetTopicRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.get_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_topic(self, + request: Optional[Union[managed_kafka.CreateTopicRequest, dict]] = None, + *, + parent: Optional[str] = None, + topic: Optional[resources.Topic] = None, + topic_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Topic: + r"""Creates a new topic in a given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_create_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.CreateTopicRequest( + parent="parent_value", + topic_id="topic_id_value", + topic=topic, + ) + + # Make the request + response = await client.create_topic(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.CreateTopicRequest, dict]]): + The request object. Request for CreateTopic. + parent (:class:`str`): + Required. The parent cluster in which to create the + topic. Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + topic (:class:`google.cloud.managedkafka_v1.types.Topic`): + Required. Configuration of the topic to create. Its + ``name`` field is ignored. + + This corresponds to the ``topic`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + topic_id (:class:`str`): + Required. The ID to use for the topic, which will become + the final component of the topic's name. + + This value is structured like: ``my-topic-name``. + + This corresponds to the ``topic_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Topic: + A Kafka topic in a given cluster. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, topic, topic_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.CreateTopicRequest): + request = managed_kafka.CreateTopicRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if topic is not None: + request.topic = topic + if topic_id is not None: + request.topic_id = topic_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.create_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_topic(self, + request: Optional[Union[managed_kafka.UpdateTopicRequest, dict]] = None, + *, + topic: Optional[resources.Topic] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Topic: + r"""Updates the properties of a single topic. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_update_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.UpdateTopicRequest( + topic=topic, + ) + + # Make the request + response = await client.update_topic(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.UpdateTopicRequest, dict]]): + The request object. Request for UpdateTopic. + topic (:class:`google.cloud.managedkafka_v1.types.Topic`): + Required. The topic to update. Its ``name`` field must + be populated. + + This corresponds to the ``topic`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the Topic resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. The mask is required + and a value of \* will update all fields. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Topic: + A Kafka topic in a given cluster. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([topic, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.UpdateTopicRequest): + request = managed_kafka.UpdateTopicRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if topic is not None: + request.topic = topic + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.update_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("topic.name", request.topic.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_topic(self, + request: Optional[Union[managed_kafka.DeleteTopicRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a single topic. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_delete_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteTopicRequest( + name="name_value", + ) + + # Make the request + await client.delete_topic(request=request) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.DeleteTopicRequest, dict]]): + The request object. Request for DeleteTopic. + name (:class:`str`): + Required. The name of the topic to delete. + ``projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.DeleteTopicRequest): + request = managed_kafka.DeleteTopicRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.delete_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_consumer_groups(self, + request: Optional[Union[managed_kafka.ListConsumerGroupsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListConsumerGroupsAsyncPager: + r"""Lists the consumer groups in a given cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_list_consumer_groups(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListConsumerGroupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_consumer_groups(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.ListConsumerGroupsRequest, dict]]): + The request object. Request for ListConsumerGroups. + parent (:class:`str`): + Required. The parent cluster whose consumer groups are + to be listed. Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListConsumerGroupsAsyncPager: + Response for ListConsumerGroups. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.ListConsumerGroupsRequest): + request = managed_kafka.ListConsumerGroupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.list_consumer_groups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListConsumerGroupsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_consumer_group(self, + request: Optional[Union[managed_kafka.GetConsumerGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.ConsumerGroup: + r"""Returns the properties of a single consumer group. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_get_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetConsumerGroupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_consumer_group(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.GetConsumerGroupRequest, dict]]): + The request object. Request for GetConsumerGroup. + name (:class:`str`): + Required. The name of the consumer group whose + configuration to return. + ``projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.ConsumerGroup: + A Kafka consumer group in a given + cluster. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.GetConsumerGroupRequest): + request = managed_kafka.GetConsumerGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.get_consumer_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_consumer_group(self, + request: Optional[Union[managed_kafka.UpdateConsumerGroupRequest, dict]] = None, + *, + consumer_group: Optional[resources.ConsumerGroup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.ConsumerGroup: + r"""Updates the properties of a single consumer group. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_update_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.UpdateConsumerGroupRequest( + ) + + # Make the request + response = await client.update_consumer_group(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.UpdateConsumerGroupRequest, dict]]): + The request object. Request for UpdateConsumerGroup. + consumer_group (:class:`google.cloud.managedkafka_v1.types.ConsumerGroup`): + Required. The consumer group to update. Its ``name`` + field must be populated. + + This corresponds to the ``consumer_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the ConsumerGroup resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. The mask is required + and a value of \* will update all fields. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.ConsumerGroup: + A Kafka consumer group in a given + cluster. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([consumer_group, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.UpdateConsumerGroupRequest): + request = managed_kafka.UpdateConsumerGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if consumer_group is not None: + request.consumer_group = consumer_group + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.update_consumer_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("consumer_group.name", request.consumer_group.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_consumer_group(self, + request: Optional[Union[managed_kafka.DeleteConsumerGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a single consumer group. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + async def sample_delete_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteConsumerGroupRequest( + name="name_value", + ) + + # Make the request + await client.delete_consumer_group(request=request) + + Args: + request (Optional[Union[google.cloud.managedkafka_v1.types.DeleteConsumerGroupRequest, dict]]): + The request object. Request for DeleteConsumerGroup. + name (:class:`str`): + Required. The name of the consumer group to delete. + ``projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.DeleteConsumerGroupRequest): + request = managed_kafka.DeleteConsumerGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.delete_consumer_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def __aenter__(self) -> "ManagedKafkaAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "ManagedKafkaAsyncClient", +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/client.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/client.py new file mode 100644 index 000000000000..68901b92deab --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/client.py @@ -0,0 +1,2557 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Callable, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast +import warnings + +from google.cloud.managedkafka_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.cloud.managedkafka_v1.services.managed_kafka import pagers +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ManagedKafkaTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ManagedKafkaGrpcTransport +from .transports.grpc_asyncio import ManagedKafkaGrpcAsyncIOTransport +from .transports.rest import ManagedKafkaRestTransport + + +class ManagedKafkaClientMeta(type): + """Metaclass for the ManagedKafka client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ManagedKafkaTransport]] + _transport_registry["grpc"] = ManagedKafkaGrpcTransport + _transport_registry["grpc_asyncio"] = ManagedKafkaGrpcAsyncIOTransport + _transport_registry["rest"] = ManagedKafkaRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[ManagedKafkaTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ManagedKafkaClient(metaclass=ManagedKafkaClientMeta): + """The service that a client application uses to manage Apache + Kafka clusters, topics and consumer groups. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "managedkafka.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "managedkafka.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ManagedKafkaClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ManagedKafkaClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ManagedKafkaTransport: + """Returns the transport used by the client instance. + + Returns: + ManagedKafkaTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def cluster_path(project: str,location: str,cluster: str,) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str,str]: + """Parses a cluster path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def consumer_group_path(project: str,location: str,cluster: str,consumer_group: str,) -> str: + """Returns a fully-qualified consumer_group string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group}".format(project=project, location=location, cluster=cluster, consumer_group=consumer_group, ) + + @staticmethod + def parse_consumer_group_path(path: str) -> Dict[str,str]: + """Parses a consumer_group path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)/consumerGroups/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def crypto_key_path(project: str,location: str,key_ring: str,crypto_key: str,) -> str: + """Returns a fully-qualified crypto_key string.""" + return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, ) + + @staticmethod + def parse_crypto_key_path(path: str) -> Dict[str,str]: + """Parses a crypto_key path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def topic_path(project: str,location: str,cluster: str,topic: str,) -> str: + """Returns a fully-qualified topic string.""" + return "projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}".format(project=project, location=location, cluster=cluster, topic=topic, ) + + @staticmethod + def parse_topic_path(path: str) -> Dict[str,str]: + """Parses a topic path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)/topics/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn("get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false").lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint(api_override, client_cert_source, universe_domain, use_mtls_endpoint): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + _default_universe = ManagedKafkaClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError(f"mTLS is not supported in any universe other than {_default_universe}.") + api_endpoint = ManagedKafkaClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ManagedKafkaClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=universe_domain) + return api_endpoint + + @staticmethod + def _get_universe_domain(client_universe_domain: Optional[str], universe_domain_env: Optional[str]) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = ManagedKafkaClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes(client_universe: str, + credentials: ga_credentials.Credentials) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = ManagedKafkaClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError("The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default.") + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = (self._is_universe_domain_valid or + ManagedKafkaClient._compare_universes(self.universe_domain, self.transport._credentials)) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ManagedKafkaTransport, Callable[..., ManagedKafkaTransport]]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the managed kafka client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,ManagedKafkaTransport,Callable[..., ManagedKafkaTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the ManagedKafkaTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast(client_options_lib.ClientOptions, self._client_options) + + universe_domain_opt = getattr(self._client_options, 'universe_domain', None) + + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ManagedKafkaClient._read_environment_variables() + self._client_cert_source = ManagedKafkaClient._get_client_cert_source(self._client_options.client_cert_source, self._use_client_cert) + self._universe_domain = ManagedKafkaClient._get_universe_domain(universe_domain_opt, self._universe_domain_env) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, ManagedKafkaTransport) + if transport_provided: + # transport is a ManagedKafkaTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(ManagedKafkaTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = (self._api_endpoint or + ManagedKafkaClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint)) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + transport_init: Union[Type[ManagedKafkaTransport], Callable[..., ManagedKafkaTransport]] = ( + ManagedKafkaClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., ManagedKafkaTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def list_clusters(self, + request: Optional[Union[managed_kafka.ListClustersRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersPager: + r"""Lists the clusters in a given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_list_clusters(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.ListClustersRequest, dict]): + The request object. Request for ListClusters. + parent (str): + Required. The parent location whose clusters are to be + listed. Structured like + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListClustersPager: + Response for ListClusters. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.ListClustersRequest): + request = managed_kafka.ListClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListClustersPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_cluster(self, + request: Optional[Union[managed_kafka.GetClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Cluster: + r"""Returns the properties of a single cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_get_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.GetClusterRequest, dict]): + The request object. Request for GetCluster. + name (str): + Required. The name of the cluster + whose configuration to return. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Cluster: + An Apache Kafka cluster deployed in a + location. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.GetClusterRequest): + request = managed_kafka.GetClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_cluster(self, + request: Optional[Union[managed_kafka.CreateClusterRequest, dict]] = None, + *, + parent: Optional[str] = None, + cluster: Optional[resources.Cluster] = None, + cluster_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new cluster in a given project and + location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_create_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=cluster, + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.CreateClusterRequest, dict]): + The request object. Request for CreateCluster. + parent (str): + Required. The parent region in which to create the + cluster. Structured like + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.managedkafka_v1.types.Cluster): + Required. Configuration of the cluster to create. Its + ``name`` field is ignored. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. The ID to use for the cluster, which will + become the final component of the cluster's name. The ID + must be 1-63 characters long, and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` to comply with + RFC 1035. + + This value is structured like: ``my-cluster-id``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.managedkafka_v1.types.Cluster` An + Apache Kafka cluster deployed in a location. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.CreateClusterRequest): + request = managed_kafka.CreateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cluster is not None: + request.cluster = cluster + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + resources.Cluster, + metadata_type=resources.OperationMetadata, + ) + + # Done; return the response. + return response + + def update_cluster(self, + request: Optional[Union[managed_kafka.UpdateClusterRequest, dict]] = None, + *, + cluster: Optional[resources.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates the properties of a single cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_update_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.UpdateClusterRequest( + cluster=cluster, + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.UpdateClusterRequest, dict]): + The request object. Request for UpdateCluster. + cluster (google.cloud.managedkafka_v1.types.Cluster): + Required. The cluster to update. Its ``name`` field must + be populated. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the cluster resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. The mask is required + and a value of \* will update all fields. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.managedkafka_v1.types.Cluster` An + Apache Kafka cluster deployed in a location. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.UpdateClusterRequest): + request = managed_kafka.UpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("cluster.name", request.cluster.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + resources.Cluster, + metadata_type=resources.OperationMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster(self, + request: Optional[Union[managed_kafka.DeleteClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a single cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_delete_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.DeleteClusterRequest, dict]): + The request object. Request for DeleteCluster. + name (str): + Required. The name of the cluster to + delete. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.DeleteClusterRequest): + request = managed_kafka.DeleteClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=resources.OperationMetadata, + ) + + # Done; return the response. + return response + + def list_topics(self, + request: Optional[Union[managed_kafka.ListTopicsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTopicsPager: + r"""Lists the topics in a given cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_list_topics(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListTopicsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_topics(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.ListTopicsRequest, dict]): + The request object. Request for ListTopics. + parent (str): + Required. The parent cluster whose topics are to be + listed. Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListTopicsPager: + Response for ListTopics. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.ListTopicsRequest): + request = managed_kafka.ListTopicsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_topics] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTopicsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_topic(self, + request: Optional[Union[managed_kafka.GetTopicRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Topic: + r"""Returns the properties of a single topic. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_get_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetTopicRequest( + name="name_value", + ) + + # Make the request + response = client.get_topic(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.GetTopicRequest, dict]): + The request object. Request for GetTopic. + name (str): + Required. The name of the topic whose + configuration to return. Structured + like: + + projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Topic: + A Kafka topic in a given cluster. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.GetTopicRequest): + request = managed_kafka.GetTopicRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_topic(self, + request: Optional[Union[managed_kafka.CreateTopicRequest, dict]] = None, + *, + parent: Optional[str] = None, + topic: Optional[resources.Topic] = None, + topic_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Topic: + r"""Creates a new topic in a given project and location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_create_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.CreateTopicRequest( + parent="parent_value", + topic_id="topic_id_value", + topic=topic, + ) + + # Make the request + response = client.create_topic(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.CreateTopicRequest, dict]): + The request object. Request for CreateTopic. + parent (str): + Required. The parent cluster in which to create the + topic. Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + topic (google.cloud.managedkafka_v1.types.Topic): + Required. Configuration of the topic to create. Its + ``name`` field is ignored. + + This corresponds to the ``topic`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + topic_id (str): + Required. The ID to use for the topic, which will become + the final component of the topic's name. + + This value is structured like: ``my-topic-name``. + + This corresponds to the ``topic_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Topic: + A Kafka topic in a given cluster. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, topic, topic_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.CreateTopicRequest): + request = managed_kafka.CreateTopicRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if topic is not None: + request.topic = topic + if topic_id is not None: + request.topic_id = topic_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_topic(self, + request: Optional[Union[managed_kafka.UpdateTopicRequest, dict]] = None, + *, + topic: Optional[resources.Topic] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.Topic: + r"""Updates the properties of a single topic. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_update_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.UpdateTopicRequest( + topic=topic, + ) + + # Make the request + response = client.update_topic(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.UpdateTopicRequest, dict]): + The request object. Request for UpdateTopic. + topic (google.cloud.managedkafka_v1.types.Topic): + Required. The topic to update. Its ``name`` field must + be populated. + + This corresponds to the ``topic`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Topic resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. The mask is required + and a value of \* will update all fields. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.Topic: + A Kafka topic in a given cluster. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([topic, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.UpdateTopicRequest): + request = managed_kafka.UpdateTopicRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if topic is not None: + request.topic = topic + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("topic.name", request.topic.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_topic(self, + request: Optional[Union[managed_kafka.DeleteTopicRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a single topic. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_delete_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteTopicRequest( + name="name_value", + ) + + # Make the request + client.delete_topic(request=request) + + Args: + request (Union[google.cloud.managedkafka_v1.types.DeleteTopicRequest, dict]): + The request object. Request for DeleteTopic. + name (str): + Required. The name of the topic to delete. + ``projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.DeleteTopicRequest): + request = managed_kafka.DeleteTopicRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_topic] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def list_consumer_groups(self, + request: Optional[Union[managed_kafka.ListConsumerGroupsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListConsumerGroupsPager: + r"""Lists the consumer groups in a given cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_list_consumer_groups(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListConsumerGroupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_consumer_groups(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.ListConsumerGroupsRequest, dict]): + The request object. Request for ListConsumerGroups. + parent (str): + Required. The parent cluster whose consumer groups are + to be listed. Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListConsumerGroupsPager: + Response for ListConsumerGroups. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.ListConsumerGroupsRequest): + request = managed_kafka.ListConsumerGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_consumer_groups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListConsumerGroupsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_consumer_group(self, + request: Optional[Union[managed_kafka.GetConsumerGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.ConsumerGroup: + r"""Returns the properties of a single consumer group. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_get_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetConsumerGroupRequest( + name="name_value", + ) + + # Make the request + response = client.get_consumer_group(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.GetConsumerGroupRequest, dict]): + The request object. Request for GetConsumerGroup. + name (str): + Required. The name of the consumer group whose + configuration to return. + ``projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.ConsumerGroup: + A Kafka consumer group in a given + cluster. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.GetConsumerGroupRequest): + request = managed_kafka.GetConsumerGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_consumer_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_consumer_group(self, + request: Optional[Union[managed_kafka.UpdateConsumerGroupRequest, dict]] = None, + *, + consumer_group: Optional[resources.ConsumerGroup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> resources.ConsumerGroup: + r"""Updates the properties of a single consumer group. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_update_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.UpdateConsumerGroupRequest( + ) + + # Make the request + response = client.update_consumer_group(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.managedkafka_v1.types.UpdateConsumerGroupRequest, dict]): + The request object. Request for UpdateConsumerGroup. + consumer_group (google.cloud.managedkafka_v1.types.ConsumerGroup): + Required. The consumer group to update. Its ``name`` + field must be populated. + + This corresponds to the ``consumer_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the ConsumerGroup resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. The mask is required + and a value of \* will update all fields. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.managedkafka_v1.types.ConsumerGroup: + A Kafka consumer group in a given + cluster. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([consumer_group, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.UpdateConsumerGroupRequest): + request = managed_kafka.UpdateConsumerGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if consumer_group is not None: + request.consumer_group = consumer_group + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_consumer_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("consumer_group.name", request.consumer_group.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_consumer_group(self, + request: Optional[Union[managed_kafka.DeleteConsumerGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a single consumer group. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import managedkafka_v1 + + def sample_delete_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteConsumerGroupRequest( + name="name_value", + ) + + # Make the request + client.delete_consumer_group(request=request) + + Args: + request (Union[google.cloud.managedkafka_v1.types.DeleteConsumerGroupRequest, dict]): + The request object. Request for DeleteConsumerGroup. + name (str): + Required. The name of the consumer group to delete. + ``projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, managed_kafka.DeleteConsumerGroupRequest): + request = managed_kafka.DeleteConsumerGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_consumer_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self) -> "ManagedKafkaClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "ManagedKafkaClient", +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/pagers.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/pagers.py new file mode 100644 index 000000000000..d005d3dc13ff --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/pagers.py @@ -0,0 +1,433 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator, Union +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources + + +class ListClustersPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.managedkafka_v1.types.ListClustersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.managedkafka_v1.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., managed_kafka.ListClustersResponse], + request: managed_kafka.ListClustersRequest, + response: managed_kafka.ListClustersResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.managedkafka_v1.types.ListClustersRequest): + The initial request object. + response (google.cloud.managedkafka_v1.types.ListClustersResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = managed_kafka.ListClustersRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[managed_kafka.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, retry=self._retry, timeout=self._timeout, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resources.Cluster]: + for page in self.pages: + yield from page.clusters + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListClustersAsyncPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`google.cloud.managedkafka_v1.types.ListClustersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`google.cloud.managedkafka_v1.types.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[managed_kafka.ListClustersResponse]], + request: managed_kafka.ListClustersRequest, + response: managed_kafka.ListClustersResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.managedkafka_v1.types.ListClustersRequest): + The initial request object. + response (google.cloud.managedkafka_v1.types.ListClustersResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = managed_kafka.ListClustersRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[managed_kafka.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, retry=self._retry, timeout=self._timeout, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[resources.Cluster]: + async def async_generator(): + async for page in self.pages: + for response in page.clusters: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTopicsPager: + """A pager for iterating through ``list_topics`` requests. + + This class thinly wraps an initial + :class:`google.cloud.managedkafka_v1.types.ListTopicsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``topics`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTopics`` requests and continue to iterate + through the ``topics`` field on the + corresponding responses. + + All the usual :class:`google.cloud.managedkafka_v1.types.ListTopicsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., managed_kafka.ListTopicsResponse], + request: managed_kafka.ListTopicsRequest, + response: managed_kafka.ListTopicsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.managedkafka_v1.types.ListTopicsRequest): + The initial request object. + response (google.cloud.managedkafka_v1.types.ListTopicsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = managed_kafka.ListTopicsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[managed_kafka.ListTopicsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, retry=self._retry, timeout=self._timeout, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resources.Topic]: + for page in self.pages: + yield from page.topics + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTopicsAsyncPager: + """A pager for iterating through ``list_topics`` requests. + + This class thinly wraps an initial + :class:`google.cloud.managedkafka_v1.types.ListTopicsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``topics`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTopics`` requests and continue to iterate + through the ``topics`` field on the + corresponding responses. + + All the usual :class:`google.cloud.managedkafka_v1.types.ListTopicsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[managed_kafka.ListTopicsResponse]], + request: managed_kafka.ListTopicsRequest, + response: managed_kafka.ListTopicsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.managedkafka_v1.types.ListTopicsRequest): + The initial request object. + response (google.cloud.managedkafka_v1.types.ListTopicsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = managed_kafka.ListTopicsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[managed_kafka.ListTopicsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, retry=self._retry, timeout=self._timeout, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[resources.Topic]: + async def async_generator(): + async for page in self.pages: + for response in page.topics: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListConsumerGroupsPager: + """A pager for iterating through ``list_consumer_groups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.managedkafka_v1.types.ListConsumerGroupsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``consumer_groups`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListConsumerGroups`` requests and continue to iterate + through the ``consumer_groups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.managedkafka_v1.types.ListConsumerGroupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., managed_kafka.ListConsumerGroupsResponse], + request: managed_kafka.ListConsumerGroupsRequest, + response: managed_kafka.ListConsumerGroupsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.managedkafka_v1.types.ListConsumerGroupsRequest): + The initial request object. + response (google.cloud.managedkafka_v1.types.ListConsumerGroupsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = managed_kafka.ListConsumerGroupsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[managed_kafka.ListConsumerGroupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, retry=self._retry, timeout=self._timeout, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[resources.ConsumerGroup]: + for page in self.pages: + yield from page.consumer_groups + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListConsumerGroupsAsyncPager: + """A pager for iterating through ``list_consumer_groups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.managedkafka_v1.types.ListConsumerGroupsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``consumer_groups`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListConsumerGroups`` requests and continue to iterate + through the ``consumer_groups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.managedkafka_v1.types.ListConsumerGroupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[managed_kafka.ListConsumerGroupsResponse]], + request: managed_kafka.ListConsumerGroupsRequest, + response: managed_kafka.ListConsumerGroupsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.managedkafka_v1.types.ListConsumerGroupsRequest): + The initial request object. + response (google.cloud.managedkafka_v1.types.ListConsumerGroupsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = managed_kafka.ListConsumerGroupsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[managed_kafka.ListConsumerGroupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, retry=self._retry, timeout=self._timeout, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[resources.ConsumerGroup]: + async def async_generator(): + async for page in self.pages: + for response in page.consumer_groups: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/__init__.py new file mode 100644 index 000000000000..bd18cf309409 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ManagedKafkaTransport +from .grpc import ManagedKafkaGrpcTransport +from .grpc_asyncio import ManagedKafkaGrpcAsyncIOTransport +from .rest import ManagedKafkaRestTransport +from .rest import ManagedKafkaRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ManagedKafkaTransport]] +_transport_registry['grpc'] = ManagedKafkaGrpcTransport +_transport_registry['grpc_asyncio'] = ManagedKafkaGrpcAsyncIOTransport +_transport_registry['rest'] = ManagedKafkaRestTransport + +__all__ = ( + 'ManagedKafkaTransport', + 'ManagedKafkaGrpcTransport', + 'ManagedKafkaGrpcAsyncIOTransport', + 'ManagedKafkaRestTransport', + 'ManagedKafkaRestInterceptor', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/base.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/base.py new file mode 100644 index 000000000000..06002a5a6f6a --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/base.py @@ -0,0 +1,452 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.managedkafka_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.location import locations_pb2 # type: ignore +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class ManagedKafkaTransport(abc.ABC): + """Abstract transport class for ManagedKafka.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'managedkafka.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'managedkafka.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None and not self._ignore_credentials: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.list_topics: gapic_v1.method.wrap_method( + self.list_topics, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_topic: gapic_v1.method.wrap_method( + self.get_topic, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_topic: gapic_v1.method.wrap_method( + self.create_topic, + default_timeout=60.0, + client_info=client_info, + ), + self.update_topic: gapic_v1.method.wrap_method( + self.update_topic, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_topic: gapic_v1.method.wrap_method( + self.delete_topic, + default_timeout=60.0, + client_info=client_info, + ), + self.list_consumer_groups: gapic_v1.method.wrap_method( + self.list_consumer_groups, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_consumer_group: gapic_v1.method.wrap_method( + self.get_consumer_group, + default_retry=retries.Retry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_consumer_group: gapic_v1.method.wrap_method( + self.update_consumer_group, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_consumer_group: gapic_v1.method.wrap_method( + self.delete_consumer_group, + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def list_clusters(self) -> Callable[ + [managed_kafka.ListClustersRequest], + Union[ + managed_kafka.ListClustersResponse, + Awaitable[managed_kafka.ListClustersResponse] + ]]: + raise NotImplementedError() + + @property + def get_cluster(self) -> Callable[ + [managed_kafka.GetClusterRequest], + Union[ + resources.Cluster, + Awaitable[resources.Cluster] + ]]: + raise NotImplementedError() + + @property + def create_cluster(self) -> Callable[ + [managed_kafka.CreateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_cluster(self) -> Callable[ + [managed_kafka.UpdateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_cluster(self) -> Callable[ + [managed_kafka.DeleteClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_topics(self) -> Callable[ + [managed_kafka.ListTopicsRequest], + Union[ + managed_kafka.ListTopicsResponse, + Awaitable[managed_kafka.ListTopicsResponse] + ]]: + raise NotImplementedError() + + @property + def get_topic(self) -> Callable[ + [managed_kafka.GetTopicRequest], + Union[ + resources.Topic, + Awaitable[resources.Topic] + ]]: + raise NotImplementedError() + + @property + def create_topic(self) -> Callable[ + [managed_kafka.CreateTopicRequest], + Union[ + resources.Topic, + Awaitable[resources.Topic] + ]]: + raise NotImplementedError() + + @property + def update_topic(self) -> Callable[ + [managed_kafka.UpdateTopicRequest], + Union[ + resources.Topic, + Awaitable[resources.Topic] + ]]: + raise NotImplementedError() + + @property + def delete_topic(self) -> Callable[ + [managed_kafka.DeleteTopicRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def list_consumer_groups(self) -> Callable[ + [managed_kafka.ListConsumerGroupsRequest], + Union[ + managed_kafka.ListConsumerGroupsResponse, + Awaitable[managed_kafka.ListConsumerGroupsResponse] + ]]: + raise NotImplementedError() + + @property + def get_consumer_group(self) -> Callable[ + [managed_kafka.GetConsumerGroupRequest], + Union[ + resources.ConsumerGroup, + Awaitable[resources.ConsumerGroup] + ]]: + raise NotImplementedError() + + @property + def update_consumer_group(self) -> Callable[ + [managed_kafka.UpdateConsumerGroupRequest], + Union[ + resources.ConsumerGroup, + Awaitable[resources.ConsumerGroup] + ]]: + raise NotImplementedError() + + @property + def delete_consumer_group(self) -> Callable[ + [managed_kafka.DeleteConsumerGroupRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[operations_pb2.ListOperationsResponse, Awaitable[operations_pb2.ListOperationsResponse]], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def get_location(self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations(self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[locations_pb2.ListLocationsResponse, Awaitable[locations_pb2.ListLocationsResponse]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'ManagedKafkaTransport', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc.py new file mode 100644 index 000000000000..5d97ab8d82b2 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc.py @@ -0,0 +1,740 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.location import locations_pb2 # type: ignore +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import ManagedKafkaTransport, DEFAULT_CLIENT_INFO + + +class ManagedKafkaGrpcTransport(ManagedKafkaTransport): + """gRPC backend transport for ManagedKafka. + + The service that a client application uses to manage Apache + Kafka clusters, topics and consumer groups. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'managedkafka.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'managedkafka.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, grpc.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'managedkafka.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def list_clusters(self) -> Callable[ + [managed_kafka.ListClustersRequest], + managed_kafka.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists the clusters in a given project and location. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/ListClusters', + request_serializer=managed_kafka.ListClustersRequest.serialize, + response_deserializer=managed_kafka.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def get_cluster(self) -> Callable[ + [managed_kafka.GetClusterRequest], + resources.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Returns the properties of a single cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/GetCluster', + request_serializer=managed_kafka.GetClusterRequest.serialize, + response_deserializer=resources.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def create_cluster(self) -> Callable[ + [managed_kafka.CreateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a new cluster in a given project and + location. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/CreateCluster', + request_serializer=managed_kafka.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [managed_kafka.UpdateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the properties of a single cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/UpdateCluster', + request_serializer=managed_kafka.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [managed_kafka.DeleteClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a single cluster. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/DeleteCluster', + request_serializer=managed_kafka.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def list_topics(self) -> Callable[ + [managed_kafka.ListTopicsRequest], + managed_kafka.ListTopicsResponse]: + r"""Return a callable for the list topics method over gRPC. + + Lists the topics in a given cluster. + + Returns: + Callable[[~.ListTopicsRequest], + ~.ListTopicsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_topics' not in self._stubs: + self._stubs['list_topics'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/ListTopics', + request_serializer=managed_kafka.ListTopicsRequest.serialize, + response_deserializer=managed_kafka.ListTopicsResponse.deserialize, + ) + return self._stubs['list_topics'] + + @property + def get_topic(self) -> Callable[ + [managed_kafka.GetTopicRequest], + resources.Topic]: + r"""Return a callable for the get topic method over gRPC. + + Returns the properties of a single topic. + + Returns: + Callable[[~.GetTopicRequest], + ~.Topic]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_topic' not in self._stubs: + self._stubs['get_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/GetTopic', + request_serializer=managed_kafka.GetTopicRequest.serialize, + response_deserializer=resources.Topic.deserialize, + ) + return self._stubs['get_topic'] + + @property + def create_topic(self) -> Callable[ + [managed_kafka.CreateTopicRequest], + resources.Topic]: + r"""Return a callable for the create topic method over gRPC. + + Creates a new topic in a given project and location. + + Returns: + Callable[[~.CreateTopicRequest], + ~.Topic]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_topic' not in self._stubs: + self._stubs['create_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/CreateTopic', + request_serializer=managed_kafka.CreateTopicRequest.serialize, + response_deserializer=resources.Topic.deserialize, + ) + return self._stubs['create_topic'] + + @property + def update_topic(self) -> Callable[ + [managed_kafka.UpdateTopicRequest], + resources.Topic]: + r"""Return a callable for the update topic method over gRPC. + + Updates the properties of a single topic. + + Returns: + Callable[[~.UpdateTopicRequest], + ~.Topic]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_topic' not in self._stubs: + self._stubs['update_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/UpdateTopic', + request_serializer=managed_kafka.UpdateTopicRequest.serialize, + response_deserializer=resources.Topic.deserialize, + ) + return self._stubs['update_topic'] + + @property + def delete_topic(self) -> Callable[ + [managed_kafka.DeleteTopicRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete topic method over gRPC. + + Deletes a single topic. + + Returns: + Callable[[~.DeleteTopicRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_topic' not in self._stubs: + self._stubs['delete_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/DeleteTopic', + request_serializer=managed_kafka.DeleteTopicRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_topic'] + + @property + def list_consumer_groups(self) -> Callable[ + [managed_kafka.ListConsumerGroupsRequest], + managed_kafka.ListConsumerGroupsResponse]: + r"""Return a callable for the list consumer groups method over gRPC. + + Lists the consumer groups in a given cluster. + + Returns: + Callable[[~.ListConsumerGroupsRequest], + ~.ListConsumerGroupsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_consumer_groups' not in self._stubs: + self._stubs['list_consumer_groups'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/ListConsumerGroups', + request_serializer=managed_kafka.ListConsumerGroupsRequest.serialize, + response_deserializer=managed_kafka.ListConsumerGroupsResponse.deserialize, + ) + return self._stubs['list_consumer_groups'] + + @property + def get_consumer_group(self) -> Callable[ + [managed_kafka.GetConsumerGroupRequest], + resources.ConsumerGroup]: + r"""Return a callable for the get consumer group method over gRPC. + + Returns the properties of a single consumer group. + + Returns: + Callable[[~.GetConsumerGroupRequest], + ~.ConsumerGroup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_consumer_group' not in self._stubs: + self._stubs['get_consumer_group'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/GetConsumerGroup', + request_serializer=managed_kafka.GetConsumerGroupRequest.serialize, + response_deserializer=resources.ConsumerGroup.deserialize, + ) + return self._stubs['get_consumer_group'] + + @property + def update_consumer_group(self) -> Callable[ + [managed_kafka.UpdateConsumerGroupRequest], + resources.ConsumerGroup]: + r"""Return a callable for the update consumer group method over gRPC. + + Updates the properties of a single consumer group. + + Returns: + Callable[[~.UpdateConsumerGroupRequest], + ~.ConsumerGroup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_consumer_group' not in self._stubs: + self._stubs['update_consumer_group'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/UpdateConsumerGroup', + request_serializer=managed_kafka.UpdateConsumerGroupRequest.serialize, + response_deserializer=resources.ConsumerGroup.deserialize, + ) + return self._stubs['update_consumer_group'] + + @property + def delete_consumer_group(self) -> Callable[ + [managed_kafka.DeleteConsumerGroupRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete consumer group method over gRPC. + + Deletes a single consumer group. + + Returns: + Callable[[~.DeleteConsumerGroupRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_consumer_group' not in self._stubs: + self._stubs['delete_consumer_group'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/DeleteConsumerGroup', + request_serializer=managed_kafka.DeleteConsumerGroupRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_consumer_group'] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'ManagedKafkaGrpcTransport', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc_asyncio.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc_asyncio.py new file mode 100644 index 000000000000..bb18458a4ab1 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/grpc_asyncio.py @@ -0,0 +1,869 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.location import locations_pb2 # type: ignore +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import ManagedKafkaTransport, DEFAULT_CLIENT_INFO +from .grpc import ManagedKafkaGrpcTransport + + +class ManagedKafkaGrpcAsyncIOTransport(ManagedKafkaTransport): + """gRPC AsyncIO backend transport for ManagedKafka. + + The service that a client application uses to manage Apache + Kafka clusters, topics and consumer groups. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'managedkafka.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'managedkafka.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'managedkafka.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, aio.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def list_clusters(self) -> Callable[ + [managed_kafka.ListClustersRequest], + Awaitable[managed_kafka.ListClustersResponse]]: + r"""Return a callable for the list clusters method over gRPC. + + Lists the clusters in a given project and location. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/ListClusters', + request_serializer=managed_kafka.ListClustersRequest.serialize, + response_deserializer=managed_kafka.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def get_cluster(self) -> Callable[ + [managed_kafka.GetClusterRequest], + Awaitable[resources.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Returns the properties of a single cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/GetCluster', + request_serializer=managed_kafka.GetClusterRequest.serialize, + response_deserializer=resources.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def create_cluster(self) -> Callable[ + [managed_kafka.CreateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a new cluster in a given project and + location. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/CreateCluster', + request_serializer=managed_kafka.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [managed_kafka.UpdateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the properties of a single cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/UpdateCluster', + request_serializer=managed_kafka.UpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [managed_kafka.DeleteClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a single cluster. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/DeleteCluster', + request_serializer=managed_kafka.DeleteClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def list_topics(self) -> Callable[ + [managed_kafka.ListTopicsRequest], + Awaitable[managed_kafka.ListTopicsResponse]]: + r"""Return a callable for the list topics method over gRPC. + + Lists the topics in a given cluster. + + Returns: + Callable[[~.ListTopicsRequest], + Awaitable[~.ListTopicsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_topics' not in self._stubs: + self._stubs['list_topics'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/ListTopics', + request_serializer=managed_kafka.ListTopicsRequest.serialize, + response_deserializer=managed_kafka.ListTopicsResponse.deserialize, + ) + return self._stubs['list_topics'] + + @property + def get_topic(self) -> Callable[ + [managed_kafka.GetTopicRequest], + Awaitable[resources.Topic]]: + r"""Return a callable for the get topic method over gRPC. + + Returns the properties of a single topic. + + Returns: + Callable[[~.GetTopicRequest], + Awaitable[~.Topic]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_topic' not in self._stubs: + self._stubs['get_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/GetTopic', + request_serializer=managed_kafka.GetTopicRequest.serialize, + response_deserializer=resources.Topic.deserialize, + ) + return self._stubs['get_topic'] + + @property + def create_topic(self) -> Callable[ + [managed_kafka.CreateTopicRequest], + Awaitable[resources.Topic]]: + r"""Return a callable for the create topic method over gRPC. + + Creates a new topic in a given project and location. + + Returns: + Callable[[~.CreateTopicRequest], + Awaitable[~.Topic]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_topic' not in self._stubs: + self._stubs['create_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/CreateTopic', + request_serializer=managed_kafka.CreateTopicRequest.serialize, + response_deserializer=resources.Topic.deserialize, + ) + return self._stubs['create_topic'] + + @property + def update_topic(self) -> Callable[ + [managed_kafka.UpdateTopicRequest], + Awaitable[resources.Topic]]: + r"""Return a callable for the update topic method over gRPC. + + Updates the properties of a single topic. + + Returns: + Callable[[~.UpdateTopicRequest], + Awaitable[~.Topic]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_topic' not in self._stubs: + self._stubs['update_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/UpdateTopic', + request_serializer=managed_kafka.UpdateTopicRequest.serialize, + response_deserializer=resources.Topic.deserialize, + ) + return self._stubs['update_topic'] + + @property + def delete_topic(self) -> Callable[ + [managed_kafka.DeleteTopicRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete topic method over gRPC. + + Deletes a single topic. + + Returns: + Callable[[~.DeleteTopicRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_topic' not in self._stubs: + self._stubs['delete_topic'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/DeleteTopic', + request_serializer=managed_kafka.DeleteTopicRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_topic'] + + @property + def list_consumer_groups(self) -> Callable[ + [managed_kafka.ListConsumerGroupsRequest], + Awaitable[managed_kafka.ListConsumerGroupsResponse]]: + r"""Return a callable for the list consumer groups method over gRPC. + + Lists the consumer groups in a given cluster. + + Returns: + Callable[[~.ListConsumerGroupsRequest], + Awaitable[~.ListConsumerGroupsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_consumer_groups' not in self._stubs: + self._stubs['list_consumer_groups'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/ListConsumerGroups', + request_serializer=managed_kafka.ListConsumerGroupsRequest.serialize, + response_deserializer=managed_kafka.ListConsumerGroupsResponse.deserialize, + ) + return self._stubs['list_consumer_groups'] + + @property + def get_consumer_group(self) -> Callable[ + [managed_kafka.GetConsumerGroupRequest], + Awaitable[resources.ConsumerGroup]]: + r"""Return a callable for the get consumer group method over gRPC. + + Returns the properties of a single consumer group. + + Returns: + Callable[[~.GetConsumerGroupRequest], + Awaitable[~.ConsumerGroup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_consumer_group' not in self._stubs: + self._stubs['get_consumer_group'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/GetConsumerGroup', + request_serializer=managed_kafka.GetConsumerGroupRequest.serialize, + response_deserializer=resources.ConsumerGroup.deserialize, + ) + return self._stubs['get_consumer_group'] + + @property + def update_consumer_group(self) -> Callable[ + [managed_kafka.UpdateConsumerGroupRequest], + Awaitable[resources.ConsumerGroup]]: + r"""Return a callable for the update consumer group method over gRPC. + + Updates the properties of a single consumer group. + + Returns: + Callable[[~.UpdateConsumerGroupRequest], + Awaitable[~.ConsumerGroup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_consumer_group' not in self._stubs: + self._stubs['update_consumer_group'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/UpdateConsumerGroup', + request_serializer=managed_kafka.UpdateConsumerGroupRequest.serialize, + response_deserializer=resources.ConsumerGroup.deserialize, + ) + return self._stubs['update_consumer_group'] + + @property + def delete_consumer_group(self) -> Callable[ + [managed_kafka.DeleteConsumerGroupRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete consumer group method over gRPC. + + Deletes a single consumer group. + + Returns: + Callable[[~.DeleteConsumerGroupRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_consumer_group' not in self._stubs: + self._stubs['delete_consumer_group'] = self.grpc_channel.unary_unary( + '/google.cloud.managedkafka.v1.ManagedKafka/DeleteConsumerGroup', + request_serializer=managed_kafka.DeleteConsumerGroupRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_consumer_group'] + + def _prep_wrapped_messages(self, client_info): + """ Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.list_clusters: gapic_v1.method_async.wrap_method( + self.list_clusters, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method_async.wrap_method( + self.get_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method_async.wrap_method( + self.create_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method_async.wrap_method( + self.update_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method_async.wrap_method( + self.delete_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.list_topics: gapic_v1.method_async.wrap_method( + self.list_topics, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_topic: gapic_v1.method_async.wrap_method( + self.get_topic, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_topic: gapic_v1.method_async.wrap_method( + self.create_topic, + default_timeout=60.0, + client_info=client_info, + ), + self.update_topic: gapic_v1.method_async.wrap_method( + self.update_topic, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_topic: gapic_v1.method_async.wrap_method( + self.delete_topic, + default_timeout=60.0, + client_info=client_info, + ), + self.list_consumer_groups: gapic_v1.method_async.wrap_method( + self.list_consumer_groups, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_consumer_group: gapic_v1.method_async.wrap_method( + self.get_consumer_group, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=10.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_consumer_group: gapic_v1.method_async.wrap_method( + self.update_consumer_group, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_consumer_group: gapic_v1.method_async.wrap_method( + self.delete_consumer_group, + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse]: + r"""Return a callable for the list_operations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + +__all__ = ( + 'ManagedKafkaGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/rest.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/rest.py new file mode 100644 index 000000000000..8b3558245ac5 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/services/managed_kafka/transports/rest.py @@ -0,0 +1,2264 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +from .base import ManagedKafkaTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class ManagedKafkaRestInterceptor: + """Interceptor for ManagedKafka. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the ManagedKafkaRestTransport. + + .. code-block:: python + class MyCustomManagedKafkaInterceptor(ManagedKafkaRestInterceptor): + def pre_create_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_topic(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_topic(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_consumer_group(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_topic(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_consumer_group(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_consumer_group(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_topic(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_topic(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_clusters(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_clusters(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_consumer_groups(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_consumer_groups(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_topics(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_topics(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_consumer_group(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_consumer_group(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_topic(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_topic(self, response): + logging.log(f"Received response: {response}") + return response + + transport = ManagedKafkaRestTransport(interceptor=MyCustomManagedKafkaInterceptor()) + client = ManagedKafkaClient(transport=transport) + + + """ + def pre_create_cluster(self, request: managed_kafka.CreateClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.CreateClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_create_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_cluster + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_create_topic(self, request: managed_kafka.CreateTopicRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.CreateTopicRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_topic + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_create_topic(self, response: resources.Topic) -> resources.Topic: + """Post-rpc interceptor for create_topic + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_delete_cluster(self, request: managed_kafka.DeleteClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.DeleteClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_delete_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_cluster + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_delete_consumer_group(self, request: managed_kafka.DeleteConsumerGroupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.DeleteConsumerGroupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_consumer_group + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def pre_delete_topic(self, request: managed_kafka.DeleteTopicRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.DeleteTopicRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_topic + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def pre_get_cluster(self, request: managed_kafka.GetClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.GetClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_get_cluster(self, response: resources.Cluster) -> resources.Cluster: + """Post-rpc interceptor for get_cluster + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_get_consumer_group(self, request: managed_kafka.GetConsumerGroupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.GetConsumerGroupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_consumer_group + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_get_consumer_group(self, response: resources.ConsumerGroup) -> resources.ConsumerGroup: + """Post-rpc interceptor for get_consumer_group + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_get_topic(self, request: managed_kafka.GetTopicRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.GetTopicRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_topic + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_get_topic(self, response: resources.Topic) -> resources.Topic: + """Post-rpc interceptor for get_topic + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_list_clusters(self, request: managed_kafka.ListClustersRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.ListClustersRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_clusters + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_list_clusters(self, response: managed_kafka.ListClustersResponse) -> managed_kafka.ListClustersResponse: + """Post-rpc interceptor for list_clusters + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_list_consumer_groups(self, request: managed_kafka.ListConsumerGroupsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.ListConsumerGroupsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_consumer_groups + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_list_consumer_groups(self, response: managed_kafka.ListConsumerGroupsResponse) -> managed_kafka.ListConsumerGroupsResponse: + """Post-rpc interceptor for list_consumer_groups + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_list_topics(self, request: managed_kafka.ListTopicsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.ListTopicsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_topics + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_list_topics(self, response: managed_kafka.ListTopicsResponse) -> managed_kafka.ListTopicsResponse: + """Post-rpc interceptor for list_topics + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_update_cluster(self, request: managed_kafka.UpdateClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.UpdateClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_update_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for update_cluster + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_update_consumer_group(self, request: managed_kafka.UpdateConsumerGroupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.UpdateConsumerGroupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_consumer_group + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_update_consumer_group(self, response: resources.ConsumerGroup) -> resources.ConsumerGroup: + """Post-rpc interceptor for update_consumer_group + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_update_topic(self, request: managed_kafka.UpdateTopicRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[managed_kafka.UpdateTopicRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_topic + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_update_topic(self, response: resources.Topic) -> resources.Topic: + """Post-rpc interceptor for update_topic + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, request: locations_pb2.GetLocationRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_list_locations( + self, request: locations_pb2.ListLocationsRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_cancel_operation( + self, request: operations_pb2.CancelOperationRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_cancel_operation( + self, response: None + ) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_delete_operation( + self, request: operations_pb2.DeleteOperationRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_delete_operation( + self, response: None + ) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_get_operation( + self, request: operations_pb2.GetOperationRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + def pre_list_operations( + self, request: operations_pb2.ListOperationsRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ManagedKafka server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the ManagedKafka server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ManagedKafkaRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ManagedKafkaRestInterceptor + + +class ManagedKafkaRestTransport(ManagedKafkaTransport): + """REST backend transport for ManagedKafka. + + The service that a client application uses to manage Apache + Kafka clusters, topics and consumer groups. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'managedkafka.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[ManagedKafkaRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'managedkafka.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ManagedKafkaRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + 'google.longrunning.Operations.CancelOperation': [ + { + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}:cancel', + 'body': '*', + }, + ], + 'google.longrunning.Operations.DeleteOperation': [ + { + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.GetOperation': [ + { + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ], + 'google.longrunning.Operations.ListOperations': [ + { + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*}/operations', + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1") + + self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) + + # Return the client from cache. + return self._operations_client + + class _CreateCluster(ManagedKafkaRestStub): + def __hash__(self): + return hash("CreateCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "clusterId" : "", } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.CreateClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create cluster method over HTTP. + + Args: + request (~.managed_kafka.CreateClusterRequest): + The request object. Request for CreateCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{parent=projects/*/locations/*}/clusters', + 'body': 'cluster', + }, + ] + request, metadata = self._interceptor.pre_create_cluster(request, metadata) + pb_request = managed_kafka.CreateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_cluster(resp) + return resp + + class _CreateTopic(ManagedKafkaRestStub): + def __hash__(self): + return hash("CreateTopic") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "topicId" : "", } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.CreateTopicRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> resources.Topic: + r"""Call the create topic method over HTTP. + + Args: + request (~.managed_kafka.CreateTopicRequest): + The request object. Request for CreateTopic. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resources.Topic: + A Kafka topic in a given cluster. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{parent=projects/*/locations/*/clusters/*}/topics', + 'body': 'topic', + }, + ] + request, metadata = self._interceptor.pre_create_topic(request, metadata) + pb_request = managed_kafka.CreateTopicRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resources.Topic() + pb_resp = resources.Topic.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_topic(resp) + return resp + + class _DeleteCluster(ManagedKafkaRestStub): + def __hash__(self): + return hash("DeleteCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.DeleteClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the delete cluster method over HTTP. + + Args: + request (~.managed_kafka.DeleteClusterRequest): + The request object. Request for DeleteCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/clusters/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_cluster(request, metadata) + pb_request = managed_kafka.DeleteClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_cluster(resp) + return resp + + class _DeleteConsumerGroup(ManagedKafkaRestStub): + def __hash__(self): + return hash("DeleteConsumerGroup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.DeleteConsumerGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete consumer group method over HTTP. + + Args: + request (~.managed_kafka.DeleteConsumerGroupRequest): + The request object. Request for DeleteConsumerGroup. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_consumer_group(request, metadata) + pb_request = managed_kafka.DeleteConsumerGroupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteTopic(ManagedKafkaRestStub): + def __hash__(self): + return hash("DeleteTopic") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.DeleteTopicRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete topic method over HTTP. + + Args: + request (~.managed_kafka.DeleteTopicRequest): + The request object. Request for DeleteTopic. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/clusters/*/topics/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_topic(request, metadata) + pb_request = managed_kafka.DeleteTopicRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetCluster(ManagedKafkaRestStub): + def __hash__(self): + return hash("GetCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.GetClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> resources.Cluster: + r"""Call the get cluster method over HTTP. + + Args: + request (~.managed_kafka.GetClusterRequest): + The request object. Request for GetCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resources.Cluster: + An Apache Kafka cluster deployed in a + location. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/clusters/*}', + }, + ] + request, metadata = self._interceptor.pre_get_cluster(request, metadata) + pb_request = managed_kafka.GetClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resources.Cluster() + pb_resp = resources.Cluster.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_cluster(resp) + return resp + + class _GetConsumerGroup(ManagedKafkaRestStub): + def __hash__(self): + return hash("GetConsumerGroup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.GetConsumerGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> resources.ConsumerGroup: + r"""Call the get consumer group method over HTTP. + + Args: + request (~.managed_kafka.GetConsumerGroupRequest): + The request object. Request for GetConsumerGroup. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resources.ConsumerGroup: + A Kafka consumer group in a given + cluster. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}', + }, + ] + request, metadata = self._interceptor.pre_get_consumer_group(request, metadata) + pb_request = managed_kafka.GetConsumerGroupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resources.ConsumerGroup() + pb_resp = resources.ConsumerGroup.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_consumer_group(resp) + return resp + + class _GetTopic(ManagedKafkaRestStub): + def __hash__(self): + return hash("GetTopic") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.GetTopicRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> resources.Topic: + r"""Call the get topic method over HTTP. + + Args: + request (~.managed_kafka.GetTopicRequest): + The request object. Request for GetTopic. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resources.Topic: + A Kafka topic in a given cluster. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/clusters/*/topics/*}', + }, + ] + request, metadata = self._interceptor.pre_get_topic(request, metadata) + pb_request = managed_kafka.GetTopicRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resources.Topic() + pb_resp = resources.Topic.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_topic(resp) + return resp + + class _ListClusters(ManagedKafkaRestStub): + def __hash__(self): + return hash("ListClusters") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.ListClustersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> managed_kafka.ListClustersResponse: + r"""Call the list clusters method over HTTP. + + Args: + request (~.managed_kafka.ListClustersRequest): + The request object. Request for ListClusters. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.managed_kafka.ListClustersResponse: + Response for ListClusters. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{parent=projects/*/locations/*}/clusters', + }, + ] + request, metadata = self._interceptor.pre_list_clusters(request, metadata) + pb_request = managed_kafka.ListClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = managed_kafka.ListClustersResponse() + pb_resp = managed_kafka.ListClustersResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_clusters(resp) + return resp + + class _ListConsumerGroups(ManagedKafkaRestStub): + def __hash__(self): + return hash("ListConsumerGroups") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.ListConsumerGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> managed_kafka.ListConsumerGroupsResponse: + r"""Call the list consumer groups method over HTTP. + + Args: + request (~.managed_kafka.ListConsumerGroupsRequest): + The request object. Request for ListConsumerGroups. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.managed_kafka.ListConsumerGroupsResponse: + Response for ListConsumerGroups. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{parent=projects/*/locations/*/clusters/*}/consumerGroups', + }, + ] + request, metadata = self._interceptor.pre_list_consumer_groups(request, metadata) + pb_request = managed_kafka.ListConsumerGroupsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = managed_kafka.ListConsumerGroupsResponse() + pb_resp = managed_kafka.ListConsumerGroupsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_consumer_groups(resp) + return resp + + class _ListTopics(ManagedKafkaRestStub): + def __hash__(self): + return hash("ListTopics") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.ListTopicsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> managed_kafka.ListTopicsResponse: + r"""Call the list topics method over HTTP. + + Args: + request (~.managed_kafka.ListTopicsRequest): + The request object. Request for ListTopics. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.managed_kafka.ListTopicsResponse: + Response for ListTopics. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{parent=projects/*/locations/*/clusters/*}/topics', + }, + ] + request, metadata = self._interceptor.pre_list_topics(request, metadata) + pb_request = managed_kafka.ListTopicsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = managed_kafka.ListTopicsResponse() + pb_resp = managed_kafka.ListTopicsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_topics(resp) + return resp + + class _UpdateCluster(ManagedKafkaRestStub): + def __hash__(self): + return hash("UpdateCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.UpdateClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the update cluster method over HTTP. + + Args: + request (~.managed_kafka.UpdateClusterRequest): + The request object. Request for UpdateCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1/{cluster.name=projects/*/locations/*/clusters/*}', + 'body': 'cluster', + }, + ] + request, metadata = self._interceptor.pre_update_cluster(request, metadata) + pb_request = managed_kafka.UpdateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_cluster(resp) + return resp + + class _UpdateConsumerGroup(ManagedKafkaRestStub): + def __hash__(self): + return hash("UpdateConsumerGroup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.UpdateConsumerGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> resources.ConsumerGroup: + r"""Call the update consumer group method over HTTP. + + Args: + request (~.managed_kafka.UpdateConsumerGroupRequest): + The request object. Request for UpdateConsumerGroup. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resources.ConsumerGroup: + A Kafka consumer group in a given + cluster. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1/{consumer_group.name=projects/*/locations/*/clusters/*/consumerGroups/*}', + 'body': 'consumer_group', + }, + ] + request, metadata = self._interceptor.pre_update_consumer_group(request, metadata) + pb_request = managed_kafka.UpdateConsumerGroupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resources.ConsumerGroup() + pb_resp = resources.ConsumerGroup.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_consumer_group(resp) + return resp + + class _UpdateTopic(ManagedKafkaRestStub): + def __hash__(self): + return hash("UpdateTopic") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: managed_kafka.UpdateTopicRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> resources.Topic: + r"""Call the update topic method over HTTP. + + Args: + request (~.managed_kafka.UpdateTopicRequest): + The request object. Request for UpdateTopic. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.resources.Topic: + A Kafka topic in a given cluster. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v1/{topic.name=projects/*/locations/*/clusters/*/topics/*}', + 'body': 'topic', + }, + ] + request, metadata = self._interceptor.pre_update_topic(request, metadata) + pb_request = managed_kafka.UpdateTopicRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = resources.Topic() + pb_resp = resources.Topic.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_topic(resp) + return resp + + @property + def create_cluster(self) -> Callable[ + [managed_kafka.CreateClusterRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_topic(self) -> Callable[ + [managed_kafka.CreateTopicRequest], + resources.Topic]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateTopic(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_cluster(self) -> Callable[ + [managed_kafka.DeleteClusterRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_consumer_group(self) -> Callable[ + [managed_kafka.DeleteConsumerGroupRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteConsumerGroup(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_topic(self) -> Callable[ + [managed_kafka.DeleteTopicRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteTopic(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_cluster(self) -> Callable[ + [managed_kafka.GetClusterRequest], + resources.Cluster]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_consumer_group(self) -> Callable[ + [managed_kafka.GetConsumerGroupRequest], + resources.ConsumerGroup]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetConsumerGroup(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_topic(self) -> Callable[ + [managed_kafka.GetTopicRequest], + resources.Topic]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTopic(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_clusters(self) -> Callable[ + [managed_kafka.ListClustersRequest], + managed_kafka.ListClustersResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListClusters(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_consumer_groups(self) -> Callable[ + [managed_kafka.ListConsumerGroupsRequest], + managed_kafka.ListConsumerGroupsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListConsumerGroups(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_topics(self) -> Callable[ + [managed_kafka.ListTopicsRequest], + managed_kafka.ListTopicsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTopics(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_cluster(self) -> Callable[ + [managed_kafka.UpdateClusterRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_consumer_group(self) -> Callable[ + [managed_kafka.UpdateConsumerGroupRequest], + resources.ConsumerGroup]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateConsumerGroup(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_topic(self) -> Callable[ + [managed_kafka.UpdateTopicRequest], + resources.Topic]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateTopic(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(ManagedKafkaRestStub): + def __call__(self, + request: locations_pb2.GetLocationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*}', + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request['query_params'])) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(ManagedKafkaRestStub): + def __call__(self, + request: locations_pb2.ListLocationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*}/locations', + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request['query_params'])) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(ManagedKafkaRestStub): + def __call__(self, + request: operations_pb2.CancelOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}:cancel', + 'body': '*', + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + body = json.dumps(transcoded_request['body']) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request['query_params'])) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(ManagedKafkaRestStub): + def __call__(self, + request: operations_pb2.DeleteOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ] + + request, metadata = self._interceptor.pre_delete_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request['query_params'])) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(ManagedKafkaRestStub): + def __call__(self, + request: operations_pb2.GetOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*/operations/*}', + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request['query_params'])) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(ManagedKafkaRestStub): + def __call__(self, + request: operations_pb2.ListOperationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v1/{name=projects/*/locations/*}/operations', + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request['query_params'])) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'ManagedKafkaRestTransport', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/__init__.py new file mode 100644 index 000000000000..3cb9515ee5aa --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/__init__.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .managed_kafka import ( + CreateClusterRequest, + CreateTopicRequest, + DeleteClusterRequest, + DeleteConsumerGroupRequest, + DeleteTopicRequest, + GetClusterRequest, + GetConsumerGroupRequest, + GetTopicRequest, + ListClustersRequest, + ListClustersResponse, + ListConsumerGroupsRequest, + ListConsumerGroupsResponse, + ListTopicsRequest, + ListTopicsResponse, + UpdateClusterRequest, + UpdateConsumerGroupRequest, + UpdateTopicRequest, +) +from .resources import ( + AccessConfig, + CapacityConfig, + Cluster, + ConsumerGroup, + ConsumerPartitionMetadata, + ConsumerTopicMetadata, + GcpConfig, + NetworkConfig, + OperationMetadata, + RebalanceConfig, + Topic, +) + +__all__ = ( + 'CreateClusterRequest', + 'CreateTopicRequest', + 'DeleteClusterRequest', + 'DeleteConsumerGroupRequest', + 'DeleteTopicRequest', + 'GetClusterRequest', + 'GetConsumerGroupRequest', + 'GetTopicRequest', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListConsumerGroupsRequest', + 'ListConsumerGroupsResponse', + 'ListTopicsRequest', + 'ListTopicsResponse', + 'UpdateClusterRequest', + 'UpdateConsumerGroupRequest', + 'UpdateTopicRequest', + 'AccessConfig', + 'CapacityConfig', + 'Cluster', + 'ConsumerGroup', + 'ConsumerPartitionMetadata', + 'ConsumerTopicMetadata', + 'GcpConfig', + 'NetworkConfig', + 'OperationMetadata', + 'RebalanceConfig', + 'Topic', +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/managed_kafka.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/managed_kafka.py new file mode 100644 index 000000000000..2c74a252fb0a --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/managed_kafka.py @@ -0,0 +1,578 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.managedkafka_v1.types import resources +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.managedkafka.v1', + manifest={ + 'ListClustersRequest', + 'ListClustersResponse', + 'GetClusterRequest', + 'CreateClusterRequest', + 'UpdateClusterRequest', + 'DeleteClusterRequest', + 'ListTopicsRequest', + 'ListTopicsResponse', + 'GetTopicRequest', + 'CreateTopicRequest', + 'UpdateTopicRequest', + 'DeleteTopicRequest', + 'ListConsumerGroupsRequest', + 'ListConsumerGroupsResponse', + 'GetConsumerGroupRequest', + 'UpdateConsumerGroupRequest', + 'DeleteConsumerGroupRequest', + }, +) + + +class ListClustersRequest(proto.Message): + r"""Request for ListClusters. + + Attributes: + parent (str): + Required. The parent location whose clusters are to be + listed. Structured like + ``projects/{project}/locations/{location}``. + page_size (int): + Optional. The maximum number of clusters to + return. The service may return fewer than this + value. If unspecified, server will pick an + appropriate default. + page_token (str): + Optional. A page token, received from a previous + ``ListClusters`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``ListClusters`` must match the call that provided the page + token. + filter (str): + Optional. Filter expression for the result. + order_by (str): + Optional. Order by fields for the result. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + filter: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListClustersResponse(proto.Message): + r"""Response for ListClusters. + + Attributes: + clusters (MutableSequence[google.cloud.managedkafka_v1.types.Cluster]): + The list of Clusters in the requested parent. + next_page_token (str): + A token that can be sent as ``page_token`` to retrieve the + next page of results. If this field is omitted, there are no + more results. + unreachable (MutableSequence[str]): + Locations that could not be reached. + """ + + @property + def raw_page(self): + return self + + clusters: MutableSequence[resources.Cluster] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resources.Cluster, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + unreachable: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class GetClusterRequest(proto.Message): + r"""Request for GetCluster. + + Attributes: + name (str): + Required. The name of the cluster whose + configuration to return. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateClusterRequest(proto.Message): + r"""Request for CreateCluster. + + Attributes: + parent (str): + Required. The parent region in which to create the cluster. + Structured like ``projects/{project}/locations/{location}``. + cluster_id (str): + Required. The ID to use for the cluster, which will become + the final component of the cluster's name. The ID must be + 1-63 characters long, and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` to comply with RFC 1035. + + This value is structured like: ``my-cluster-id``. + cluster (google.cloud.managedkafka_v1.types.Cluster): + Required. Configuration of the cluster to create. Its + ``name`` field is ignored. + request_id (str): + Optional. An optional request ID to identify + requests. Specify a unique request ID to avoid + duplication of requests. If a request times out + or fails, retrying with the same ID allows the + server to recognize the previous attempt. For at + least 60 minutes, the server ignores duplicate + requests bearing the same ID. + + For example, consider a situation where you make + an initial request and the request times out. If + you make the request again with the same request + ID within 60 minutes of the last request, the + server checks if an original operation with the + same request ID was received. If so, the server + ignores the second request. + + The request ID must be a valid UUID. A zero UUID + is not supported + (00000000-0000-0000-0000-000000000000). + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + cluster_id: str = proto.Field( + proto.STRING, + number=2, + ) + cluster: resources.Cluster = proto.Field( + proto.MESSAGE, + number=3, + message=resources.Cluster, + ) + request_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class UpdateClusterRequest(proto.Message): + r"""Request for UpdateCluster. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the cluster resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. The mask is required and a value of \* + will update all fields. + cluster (google.cloud.managedkafka_v1.types.Cluster): + Required. The cluster to update. Its ``name`` field must be + populated. + request_id (str): + Optional. An optional request ID to identify + requests. Specify a unique request ID to avoid + duplication of requests. If a request times out + or fails, retrying with the same ID allows the + server to recognize the previous attempt. For at + least 60 minutes, the server ignores duplicate + requests bearing the same ID. + + For example, consider a situation where you make + an initial request and the request times out. If + you make the request again with the same request + ID within 60 minutes of the last request, the + server checks if an original operation with the + same request ID was received. If so, the server + ignores the second request. + + The request ID must be a valid UUID. A zero UUID + is not supported + (00000000-0000-0000-0000-000000000000). + """ + + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + cluster: resources.Cluster = proto.Field( + proto.MESSAGE, + number=2, + message=resources.Cluster, + ) + request_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class DeleteClusterRequest(proto.Message): + r"""Request for DeleteCluster. + + Attributes: + name (str): + Required. The name of the cluster to delete. + request_id (str): + Optional. An optional request ID to identify + requests. Specify a unique request ID to avoid + duplication of requests. If a request times out + or fails, retrying with the same ID allows the + server to recognize the previous attempt. For at + least 60 minutes, the server ignores duplicate + requests bearing the same ID. + + For example, consider a situation where you make + an initial request and the request times out. If + you make the request again with the same request + ID within 60 minutes of the last request, the + server checks if an original operation with the + same request ID was received. If so, the server + ignores the second request. + + The request ID must be a valid UUID. A zero UUID + is not supported + (00000000-0000-0000-0000-000000000000). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + request_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ListTopicsRequest(proto.Message): + r"""Request for ListTopics. + + Attributes: + parent (str): + Required. The parent cluster whose topics are to be listed. + Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + page_size (int): + Optional. The maximum number of topics to + return. The service may return fewer than this + value. If unset or zero, all topics for the + parent is returned. + page_token (str): + Optional. A page token, received from a previous + ``ListTopics`` call. Provide this to retrieve the subsequent + page. + + When paginating, all other parameters provided to + ``ListTopics`` must match the call that provided the page + token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListTopicsResponse(proto.Message): + r"""Response for ListTopics. + + Attributes: + topics (MutableSequence[google.cloud.managedkafka_v1.types.Topic]): + The list of topics in the requested parent. + The order of the topics is unspecified. + next_page_token (str): + A token that can be sent as ``page_token`` to retrieve the + next page of results. If this field is omitted, there are no + more results. + """ + + @property + def raw_page(self): + return self + + topics: MutableSequence[resources.Topic] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resources.Topic, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetTopicRequest(proto.Message): + r"""Request for GetTopic. + + Attributes: + name (str): + Required. The name of the topic whose + configuration to return. Structured like: + + projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateTopicRequest(proto.Message): + r"""Request for CreateTopic. + + Attributes: + parent (str): + Required. The parent cluster in which to create the topic. + Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + topic_id (str): + Required. The ID to use for the topic, which will become the + final component of the topic's name. + + This value is structured like: ``my-topic-name``. + topic (google.cloud.managedkafka_v1.types.Topic): + Required. Configuration of the topic to create. Its ``name`` + field is ignored. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + topic_id: str = proto.Field( + proto.STRING, + number=2, + ) + topic: resources.Topic = proto.Field( + proto.MESSAGE, + number=3, + message=resources.Topic, + ) + + +class UpdateTopicRequest(proto.Message): + r"""Request for UpdateTopic. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Topic resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. The mask is required and a value of \* will + update all fields. + topic (google.cloud.managedkafka_v1.types.Topic): + Required. The topic to update. Its ``name`` field must be + populated. + """ + + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + topic: resources.Topic = proto.Field( + proto.MESSAGE, + number=2, + message=resources.Topic, + ) + + +class DeleteTopicRequest(proto.Message): + r"""Request for DeleteTopic. + + Attributes: + name (str): + Required. The name of the topic to delete. + ``projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListConsumerGroupsRequest(proto.Message): + r"""Request for ListConsumerGroups. + + Attributes: + parent (str): + Required. The parent cluster whose consumer groups are to be + listed. Structured like + ``projects/{project}/locations/{location}/clusters/{cluster}``. + page_size (int): + Optional. The maximum number of consumer + groups to return. The service may return fewer + than this value. If unset or zero, all consumer + groups for the parent is returned. + page_token (str): + Optional. A page token, received from a previous + ``ListConsumerGroups`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``ListConsumerGroups`` must match the call that provided the + page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListConsumerGroupsResponse(proto.Message): + r"""Response for ListConsumerGroups. + + Attributes: + consumer_groups (MutableSequence[google.cloud.managedkafka_v1.types.ConsumerGroup]): + The list of consumer group in the requested + parent. The order of the consumer groups is + unspecified. + next_page_token (str): + A token that can be sent as ``page_token`` to retrieve the + next page of results. If this field is omitted, there are no + more results. + """ + + @property + def raw_page(self): + return self + + consumer_groups: MutableSequence[resources.ConsumerGroup] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=resources.ConsumerGroup, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetConsumerGroupRequest(proto.Message): + r"""Request for GetConsumerGroup. + + Attributes: + name (str): + Required. The name of the consumer group whose configuration + to return. + ``projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateConsumerGroupRequest(proto.Message): + r"""Request for UpdateConsumerGroup. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the ConsumerGroup resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. The mask is required and a value of \* + will update all fields. + consumer_group (google.cloud.managedkafka_v1.types.ConsumerGroup): + Required. The consumer group to update. Its ``name`` field + must be populated. + """ + + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + consumer_group: resources.ConsumerGroup = proto.Field( + proto.MESSAGE, + number=2, + message=resources.ConsumerGroup, + ) + + +class DeleteConsumerGroupRequest(proto.Message): + r"""Request for DeleteConsumerGroup. + + Attributes: + name (str): + Required. The name of the consumer group to delete. + ``projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumerGroup}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/resources.py b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/resources.py new file mode 100644 index 000000000000..8442be6f9050 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/google/cloud/managedkafka_v1/types/resources.py @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.managedkafka.v1', + manifest={ + 'Cluster', + 'CapacityConfig', + 'RebalanceConfig', + 'NetworkConfig', + 'AccessConfig', + 'GcpConfig', + 'Topic', + 'ConsumerTopicMetadata', + 'ConsumerPartitionMetadata', + 'ConsumerGroup', + 'OperationMetadata', + }, +) + + +class Cluster(proto.Message): + r"""An Apache Kafka cluster deployed in a location. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcp_config (google.cloud.managedkafka_v1.types.GcpConfig): + Required. Configuration properties for a + Kafka cluster deployed to Google Cloud Platform. + + This field is a member of `oneof`_ ``platform_config``. + name (str): + Identifier. The name of the cluster. Structured like: + projects/{project_number}/locations/{location}/clusters/{cluster_id} + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the cluster was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the cluster was + last updated. + labels (MutableMapping[str, str]): + Optional. Labels as key value pairs. + capacity_config (google.cloud.managedkafka_v1.types.CapacityConfig): + Required. Capacity configuration for the + Kafka cluster. + rebalance_config (google.cloud.managedkafka_v1.types.RebalanceConfig): + Optional. Rebalance configuration for the + Kafka cluster. + state (google.cloud.managedkafka_v1.types.Cluster.State): + Output only. The current state of the + cluster. + """ + class State(proto.Enum): + r"""The state of the cluster. + + Values: + STATE_UNSPECIFIED (0): + A state was not specified. + CREATING (1): + The cluster is being created. + ACTIVE (2): + The cluster is active. + DELETING (3): + The cluster is being deleted. + """ + STATE_UNSPECIFIED = 0 + CREATING = 1 + ACTIVE = 2 + DELETING = 3 + + gcp_config: 'GcpConfig' = proto.Field( + proto.MESSAGE, + number=9, + oneof='platform_config', + message='GcpConfig', + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + capacity_config: 'CapacityConfig' = proto.Field( + proto.MESSAGE, + number=5, + message='CapacityConfig', + ) + rebalance_config: 'RebalanceConfig' = proto.Field( + proto.MESSAGE, + number=8, + message='RebalanceConfig', + ) + state: State = proto.Field( + proto.ENUM, + number=10, + enum=State, + ) + + +class CapacityConfig(proto.Message): + r"""A capacity configuration of a Kafka cluster. + + Attributes: + vcpu_count (int): + Required. The number of vCPUs to provision + for the cluster. Minimum: 3. + memory_bytes (int): + Required. The memory to provision for the + cluster in bytes. The CPU:memory ratio + (vCPU:GiB) must be between 1:1 and 1:8. Minimum: + 3221225472 (3 GiB). + """ + + vcpu_count: int = proto.Field( + proto.INT64, + number=1, + ) + memory_bytes: int = proto.Field( + proto.INT64, + number=2, + ) + + +class RebalanceConfig(proto.Message): + r"""Defines rebalancing behavior of a Kafka cluster. + + Attributes: + mode (google.cloud.managedkafka_v1.types.RebalanceConfig.Mode): + Optional. The rebalance behavior for the cluster. When not + specified, defaults to ``NO_REBALANCE``. + """ + class Mode(proto.Enum): + r"""The partition rebalance mode for the cluster. + + Values: + MODE_UNSPECIFIED (0): + A mode was not specified. Do not use. + NO_REBALANCE (1): + Do not rebalance automatically. + AUTO_REBALANCE_ON_SCALE_UP (2): + Automatically rebalance topic partitions + among brokers when the cluster is scaled up. + """ + MODE_UNSPECIFIED = 0 + NO_REBALANCE = 1 + AUTO_REBALANCE_ON_SCALE_UP = 2 + + mode: Mode = proto.Field( + proto.ENUM, + number=1, + enum=Mode, + ) + + +class NetworkConfig(proto.Message): + r"""The configuration of a Virtual Private Cloud (VPC) network + that can access the Kafka cluster. + + Attributes: + subnet (str): + Required. Name of the VPC subnet in which to create Private + Service Connect (PSC) endpoints for the Kafka brokers and + bootstrap address. Structured like: + projects/{project}/regions/{region}/subnetworks/{subnet_id} + + The subnet must be located in the same region as the Kafka + cluster. The project may differ. Multiple subnets from the + same parent network must not be specified. + + The CIDR range of the subnet must be within the IPv4 address + ranges for private networks, as specified in RFC 1918. + """ + + subnet: str = proto.Field( + proto.STRING, + number=2, + ) + + +class AccessConfig(proto.Message): + r"""The configuration of access to the Kafka cluster. + + Attributes: + network_configs (MutableSequence[google.cloud.managedkafka_v1.types.NetworkConfig]): + Required. Virtual Private Cloud (VPC) + networks that must be granted direct access to + the Kafka cluster. Minimum of 1 network is + required. Maximum 10 networks can be specified. + """ + + network_configs: MutableSequence['NetworkConfig'] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='NetworkConfig', + ) + + +class GcpConfig(proto.Message): + r"""Configuration properties for a Kafka cluster deployed to + Google Cloud Platform. + + Attributes: + access_config (google.cloud.managedkafka_v1.types.AccessConfig): + Required. Access configuration for the Kafka + cluster. + kms_key (str): + Optional. Immutable. The Cloud KMS Key name to use for + encryption. The key must be located in the same region as + the cluster and cannot be changed. Structured like: + projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}. + """ + + access_config: 'AccessConfig' = proto.Field( + proto.MESSAGE, + number=3, + message='AccessConfig', + ) + kms_key: str = proto.Field( + proto.STRING, + number=2, + ) + + +class Topic(proto.Message): + r"""A Kafka topic in a given cluster. + + Attributes: + name (str): + Identifier. The name of the topic. The ``topic`` segment is + used when connecting directly to the cluster. Structured + like: + projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + partition_count (int): + Required. The number of partitions this topic + has. The partition count can only be increased, + not decreased. Please note that if partitions + are increased for a topic that has a key, the + partitioning logic or the ordering of the + messages will be affected. + replication_factor (int): + Required. Immutable. The number of replicas + of each partition. A replication factor of 3 is + recommended for high availability. + configs (MutableMapping[str, str]): + Optional. Configurations for the topic that are overridden + from the cluster defaults. The key of the map is a Kafka + topic property name, for example: ``cleanup.policy``, + ``compression.type``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + partition_count: int = proto.Field( + proto.INT32, + number=2, + ) + replication_factor: int = proto.Field( + proto.INT32, + number=3, + ) + configs: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + + +class ConsumerTopicMetadata(proto.Message): + r"""Metadata for a consumer group corresponding to a specific + topic. + + Attributes: + partitions (MutableMapping[int, google.cloud.managedkafka_v1.types.ConsumerPartitionMetadata]): + Optional. Metadata for this consumer group + and topic for all partition indexes it has + metadata for. + """ + + partitions: MutableMapping[int, 'ConsumerPartitionMetadata'] = proto.MapField( + proto.INT32, + proto.MESSAGE, + number=1, + message='ConsumerPartitionMetadata', + ) + + +class ConsumerPartitionMetadata(proto.Message): + r"""Metadata for a consumer group corresponding to a specific + partition. + + Attributes: + offset (int): + Required. The current offset for this + partition, or 0 if no offset has been committed. + metadata (str): + Optional. The associated metadata for this + partition, or empty if it does not exist. + """ + + offset: int = proto.Field( + proto.INT64, + number=1, + ) + metadata: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ConsumerGroup(proto.Message): + r"""A Kafka consumer group in a given cluster. + + Attributes: + name (str): + Identifier. The name of the consumer group. The + ``consumer_group`` segment is used when connecting directly + to the cluster. Structured like: + projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group} + topics (MutableMapping[str, google.cloud.managedkafka_v1.types.ConsumerTopicMetadata]): + Optional. Metadata for this consumer group + for all topics it has metadata for. The key of + the map is a topic name, structured like: + + projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic} + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + topics: MutableMapping[str, 'ConsumerTopicMetadata'] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message='ConsumerTopicMetadata', + ) + + +class OperationMetadata(proto.Message): + r"""Represents the metadata of the long-running operation. + + Attributes: + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time the operation was + created. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time the operation finished + running. + target (str): + Output only. Server-defined resource path for + the target of the operation. + verb (str): + Output only. Name of the verb executed by the + operation. + status_message (str): + Output only. Human-readable status of the + operation, if any. + requested_cancellation (bool): + Output only. Identifies whether the user has requested + cancellation of the operation. Operations that have been + cancelled successfully have [Operation.error][] value with a + [google.rpc.Status.code][google.rpc.Status.code] of 1, + corresponding to ``Code.CANCELLED``. + api_version (str): + Output only. API version used to start the + operation. + """ + + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + target: str = proto.Field( + proto.STRING, + number=3, + ) + verb: str = proto.Field( + proto.STRING, + number=4, + ) + status_message: str = proto.Field( + proto.STRING, + number=5, + ) + requested_cancellation: bool = proto.Field( + proto.BOOL, + number=6, + ) + api_version: str = proto.Field( + proto.STRING, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/mypy.ini b/owl-bot-staging/google-cloud-managedkafka/v1/mypy.ini new file mode 100644 index 000000000000..574c5aed394b --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.7 +namespace_packages = True diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/noxfile.py b/owl-bot-staging/google-cloud-managedkafka/v1/noxfile.py new file mode 100644 index 000000000000..ab019beb4fc0 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/noxfile.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import re +import shutil +import subprocess +import sys + + +import nox # type: ignore + +ALL_PYTHON = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", + "3.12" +] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = 'google-cloud-managedkafka' + +BLACK_VERSION = "black==22.3.0" +BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] +DEFAULT_PYTHON_VERSION = "3.12" + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", + "blacken", + "lint", + "prerelease_deps", +] + +@nox.session(python=ALL_PYTHON) +@nox.parametrize( + "protobuf_implementation", + [ "python", "upb", "cpp" ], +) +def unit(session, protobuf_implementation): + """Run the unit test suite.""" + + if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12"): + session.skip("cpp implementation is not supported in python 3.11+") + + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + session.install('-e', '.', "-c", f"testing/constraints-{session.python}.txt") + + # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped. + # The 'cpp' implementation requires Protobuf<4. + if protobuf_implementation == "cpp": + session.install("protobuf<4") + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/managedkafka_v1/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)), + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + +@nox.session(python=ALL_PYTHON[-1]) +@nox.parametrize( + "protobuf_implementation", + [ "python", "upb", "cpp" ], +) +def prerelease_deps(session, protobuf_implementation): + """Run the unit test suite against pre-release versions of dependencies.""" + + if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12"): + session.skip("cpp implementation is not supported in python 3.11+") + + # Install test environment dependencies + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + + # Install the package without dependencies + session.install('-e', '.', '--no-deps') + + # We test the minimum dependency versions using the minimum Python + # version so the lowest python runtime that we test has a corresponding constraints + # file, located at `testing/constraints--.txt`, which contains all of the + # dependencies and extras. + with open( + CURRENT_DIRECTORY + / "testing" + / f"constraints-{ALL_PYTHON[0]}.txt", + encoding="utf-8", + ) as constraints_file: + constraints_text = constraints_file.read() + + # Ignore leading whitespace and comment lines. + constraints_deps = [ + match.group(1) + for match in re.finditer( + r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE + ) + ] + + session.install(*constraints_deps) + + prerel_deps = [ + "googleapis-common-protos", + "google-api-core", + "google-auth", + "grpcio", + "grpcio-status", + "protobuf", + "proto-plus", + ] + + for dep in prerel_deps: + session.install("--pre", "--no-deps", "--upgrade", dep) + + # Remaining dependencies + other_deps = [ + "requests", + ] + session.install(*other_deps) + + # Print out prerelease package versions + + session.run("python", "-c", "import google.api_core; print(google.api_core.__version__)") + session.run("python", "-c", "import google.auth; print(google.auth.__version__)") + session.run("python", "-c", "import grpc; print(grpc.__version__)") + session.run( + "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" + ) + session.run( + "python", "-c", "import proto; print(proto.__version__)" + ) + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/managedkafka_v1/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)), + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=ALL_PYTHON) +def mypy(session): + """Run the type checker.""" + session.install( + 'mypy', + 'types-requests', + 'types-protobuf' + ) + session.install('.') + session.run( + 'mypy', + '-p', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx==7.0.1", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run("flake8", "google", "tests", "samples") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_async.py new file mode 100644 index 000000000000..79d7b5375b03 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_async.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_CreateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_create_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=cluster, + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_CreateCluster_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_sync.py new file mode 100644 index 000000000000..434b20484afc --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_cluster_sync.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_CreateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_create_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=cluster, + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_CreateCluster_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_async.py new file mode 100644 index 000000000000..8b360e8118a3 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_CreateTopic_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_create_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.CreateTopicRequest( + parent="parent_value", + topic_id="topic_id_value", + topic=topic, + ) + + # Make the request + response = await client.create_topic(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_CreateTopic_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_sync.py new file mode 100644 index 000000000000..f6d5af066487 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_create_topic_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_CreateTopic_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_create_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.CreateTopicRequest( + parent="parent_value", + topic_id="topic_id_value", + topic=topic, + ) + + # Make the request + response = client.create_topic(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_CreateTopic_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_async.py new file mode 100644 index 000000000000..2bd140df30fd --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_DeleteCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_delete_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_DeleteCluster_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_sync.py new file mode 100644 index 000000000000..80e7233c911d --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_cluster_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_DeleteCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_delete_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_DeleteCluster_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_async.py new file mode 100644 index 000000000000..a028587e98b0 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteConsumerGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_DeleteConsumerGroup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_delete_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteConsumerGroupRequest( + name="name_value", + ) + + # Make the request + await client.delete_consumer_group(request=request) + + +# [END managedkafka_v1_generated_ManagedKafka_DeleteConsumerGroup_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_sync.py new file mode 100644 index 000000000000..39dbfea7b954 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_consumer_group_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteConsumerGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_DeleteConsumerGroup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_delete_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteConsumerGroupRequest( + name="name_value", + ) + + # Make the request + client.delete_consumer_group(request=request) + + +# [END managedkafka_v1_generated_ManagedKafka_DeleteConsumerGroup_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_async.py new file mode 100644 index 000000000000..314d0637ea83 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_DeleteTopic_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_delete_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteTopicRequest( + name="name_value", + ) + + # Make the request + await client.delete_topic(request=request) + + +# [END managedkafka_v1_generated_ManagedKafka_DeleteTopic_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_sync.py new file mode 100644 index 000000000000..55ca1768a6a7 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_delete_topic_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_DeleteTopic_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_delete_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.DeleteTopicRequest( + name="name_value", + ) + + # Make the request + client.delete_topic(request=request) + + +# [END managedkafka_v1_generated_ManagedKafka_DeleteTopic_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_async.py new file mode 100644 index 000000000000..0a976de646d7 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_GetCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_get_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_GetCluster_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_sync.py new file mode 100644 index 000000000000..91e0634abb3f --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_cluster_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_GetCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_get_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_GetCluster_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_async.py new file mode 100644 index 000000000000..a2c263fe1e4c --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetConsumerGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_GetConsumerGroup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_get_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetConsumerGroupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_consumer_group(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_GetConsumerGroup_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_sync.py new file mode 100644 index 000000000000..57864a86c774 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_consumer_group_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetConsumerGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_GetConsumerGroup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_get_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetConsumerGroupRequest( + name="name_value", + ) + + # Make the request + response = client.get_consumer_group(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_GetConsumerGroup_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_async.py new file mode 100644 index 000000000000..6f12e700c5cf --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_GetTopic_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_get_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetTopicRequest( + name="name_value", + ) + + # Make the request + response = await client.get_topic(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_GetTopic_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_sync.py new file mode 100644 index 000000000000..7eadf98d88b8 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_get_topic_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_GetTopic_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_get_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.GetTopicRequest( + name="name_value", + ) + + # Make the request + response = client.get_topic(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_GetTopic_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_async.py new file mode 100644 index 000000000000..c83b4c8c6e2d --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_ListClusters_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_list_clusters(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_ListClusters_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_sync.py new file mode 100644 index 000000000000..cea10cc11f0c --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_clusters_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_ListClusters_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_list_clusters(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_clusters(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_ListClusters_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_async.py new file mode 100644 index 000000000000..8642c6a36dc6 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListConsumerGroups +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_ListConsumerGroups_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_list_consumer_groups(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListConsumerGroupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_consumer_groups(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_ListConsumerGroups_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_sync.py new file mode 100644 index 000000000000..e84fb24f13e2 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_consumer_groups_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListConsumerGroups +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_ListConsumerGroups_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_list_consumer_groups(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListConsumerGroupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_consumer_groups(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_ListConsumerGroups_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_async.py new file mode 100644 index 000000000000..2709cdd159ae --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTopics +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_ListTopics_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_list_topics(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListTopicsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_topics(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_ListTopics_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_sync.py new file mode 100644 index 000000000000..6fb3377eda05 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_list_topics_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTopics +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_ListTopics_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_list_topics(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.ListTopicsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_topics(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_ListTopics_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_async.py new file mode 100644 index 000000000000..6924fca95523 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_UpdateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_update_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.UpdateClusterRequest( + cluster=cluster, + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_UpdateCluster_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_sync.py new file mode 100644 index 000000000000..4ac865e49d44 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_cluster_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_UpdateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_update_cluster(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + cluster = managedkafka_v1.Cluster() + cluster.gcp_config.access_config.network_configs.subnet = "subnet_value" + cluster.capacity_config.vcpu_count = 1094 + cluster.capacity_config.memory_bytes = 1311 + + request = managedkafka_v1.UpdateClusterRequest( + cluster=cluster, + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_UpdateCluster_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_async.py new file mode 100644 index 000000000000..9c64f598b850 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateConsumerGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_UpdateConsumerGroup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_update_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + request = managedkafka_v1.UpdateConsumerGroupRequest( + ) + + # Make the request + response = await client.update_consumer_group(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_UpdateConsumerGroup_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_sync.py new file mode 100644 index 000000000000..bc96b7c4c2ac --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_consumer_group_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateConsumerGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_UpdateConsumerGroup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_update_consumer_group(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + request = managedkafka_v1.UpdateConsumerGroupRequest( + ) + + # Make the request + response = client.update_consumer_group(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_UpdateConsumerGroup_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_async.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_async.py new file mode 100644 index 000000000000..d922730296be --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_UpdateTopic_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +async def sample_update_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaAsyncClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.UpdateTopicRequest( + topic=topic, + ) + + # Make the request + response = await client.update_topic(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_UpdateTopic_async] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_sync.py b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_sync.py new file mode 100644 index 000000000000..64f56b52277e --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/managedkafka_v1_generated_managed_kafka_update_topic_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTopic +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-managedkafka + + +# [START managedkafka_v1_generated_ManagedKafka_UpdateTopic_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import managedkafka_v1 + + +def sample_update_topic(): + # Create a client + client = managedkafka_v1.ManagedKafkaClient() + + # Initialize request argument(s) + topic = managedkafka_v1.Topic() + topic.partition_count = 1634 + topic.replication_factor = 1912 + + request = managedkafka_v1.UpdateTopicRequest( + topic=topic, + ) + + # Make the request + response = client.update_topic(request=request) + + # Handle the response + print(response) + +# [END managedkafka_v1_generated_ManagedKafka_UpdateTopic_sync] diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/snippet_metadata_google.cloud.managedkafka.v1.json b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/snippet_metadata_google.cloud.managedkafka.v1.json new file mode 100644 index 000000000000..37b09a104f58 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/samples/generated_samples/snippet_metadata_google.cloud.managedkafka.v1.json @@ -0,0 +1,2313 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.managedkafka.v1", + "version": "v1" + } + ], + "language": "PYTHON", + "name": "google-cloud-managedkafka", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.create_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.CreateCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.managedkafka_v1.types.Cluster" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "managedkafka_v1_generated_managed_kafka_create_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_CreateCluster_async", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 52, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 59, + "start": 53, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 60, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_create_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.create_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.CreateCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.managedkafka_v1.types.Cluster" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "managedkafka_v1_generated_managed_kafka_create_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_CreateCluster_sync", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 52, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 59, + "start": 53, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 60, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_create_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.create_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.CreateTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "CreateTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.CreateTopicRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "topic", + "type": "google.cloud.managedkafka_v1.types.Topic" + }, + { + "name": "topic_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Topic", + "shortName": "create_topic" + }, + "description": "Sample for CreateTopic", + "file": "managedkafka_v1_generated_managed_kafka_create_topic_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_CreateTopic_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_create_topic_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.create_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.CreateTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "CreateTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.CreateTopicRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "topic", + "type": "google.cloud.managedkafka_v1.types.Topic" + }, + { + "name": "topic_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Topic", + "shortName": "create_topic" + }, + "description": "Sample for CreateTopic", + "file": "managedkafka_v1_generated_managed_kafka_create_topic_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_CreateTopic_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_create_topic_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.delete_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.DeleteCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "managedkafka_v1_generated_managed_kafka_delete_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_DeleteCluster_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_delete_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.delete_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.DeleteCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "managedkafka_v1_generated_managed_kafka_delete_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_DeleteCluster_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_delete_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.delete_consumer_group", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.DeleteConsumerGroup", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "DeleteConsumerGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.DeleteConsumerGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_consumer_group" + }, + "description": "Sample for DeleteConsumerGroup", + "file": "managedkafka_v1_generated_managed_kafka_delete_consumer_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_DeleteConsumerGroup_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_delete_consumer_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.delete_consumer_group", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.DeleteConsumerGroup", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "DeleteConsumerGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.DeleteConsumerGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_consumer_group" + }, + "description": "Sample for DeleteConsumerGroup", + "file": "managedkafka_v1_generated_managed_kafka_delete_consumer_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_DeleteConsumerGroup_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_delete_consumer_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.delete_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.DeleteTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "DeleteTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.DeleteTopicRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_topic" + }, + "description": "Sample for DeleteTopic", + "file": "managedkafka_v1_generated_managed_kafka_delete_topic_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_DeleteTopic_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_delete_topic_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.delete_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.DeleteTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "DeleteTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.DeleteTopicRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "shortName": "delete_topic" + }, + "description": "Sample for DeleteTopic", + "file": "managedkafka_v1_generated_managed_kafka_delete_topic_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_DeleteTopic_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_delete_topic_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.get_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.GetCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "managedkafka_v1_generated_managed_kafka_get_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_GetCluster_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_get_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.get_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.GetCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "managedkafka_v1_generated_managed_kafka_get_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_GetCluster_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_get_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.get_consumer_group", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.GetConsumerGroup", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "GetConsumerGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.GetConsumerGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.ConsumerGroup", + "shortName": "get_consumer_group" + }, + "description": "Sample for GetConsumerGroup", + "file": "managedkafka_v1_generated_managed_kafka_get_consumer_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_GetConsumerGroup_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_get_consumer_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.get_consumer_group", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.GetConsumerGroup", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "GetConsumerGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.GetConsumerGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.ConsumerGroup", + "shortName": "get_consumer_group" + }, + "description": "Sample for GetConsumerGroup", + "file": "managedkafka_v1_generated_managed_kafka_get_consumer_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_GetConsumerGroup_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_get_consumer_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.get_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.GetTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "GetTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.GetTopicRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Topic", + "shortName": "get_topic" + }, + "description": "Sample for GetTopic", + "file": "managedkafka_v1_generated_managed_kafka_get_topic_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_GetTopic_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_get_topic_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.get_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.GetTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "GetTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.GetTopicRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Topic", + "shortName": "get_topic" + }, + "description": "Sample for GetTopic", + "file": "managedkafka_v1_generated_managed_kafka_get_topic_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_GetTopic_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_get_topic_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.list_clusters", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.ListClusters", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListClustersAsyncPager", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "managedkafka_v1_generated_managed_kafka_list_clusters_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_ListClusters_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_list_clusters_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.list_clusters", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.ListClusters", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListClustersPager", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "managedkafka_v1_generated_managed_kafka_list_clusters_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_ListClusters_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_list_clusters_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.list_consumer_groups", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.ListConsumerGroups", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "ListConsumerGroups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.ListConsumerGroupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListConsumerGroupsAsyncPager", + "shortName": "list_consumer_groups" + }, + "description": "Sample for ListConsumerGroups", + "file": "managedkafka_v1_generated_managed_kafka_list_consumer_groups_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_ListConsumerGroups_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_list_consumer_groups_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.list_consumer_groups", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.ListConsumerGroups", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "ListConsumerGroups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.ListConsumerGroupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListConsumerGroupsPager", + "shortName": "list_consumer_groups" + }, + "description": "Sample for ListConsumerGroups", + "file": "managedkafka_v1_generated_managed_kafka_list_consumer_groups_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_ListConsumerGroups_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_list_consumer_groups_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.list_topics", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.ListTopics", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "ListTopics" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.ListTopicsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListTopicsAsyncPager", + "shortName": "list_topics" + }, + "description": "Sample for ListTopics", + "file": "managedkafka_v1_generated_managed_kafka_list_topics_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_ListTopics_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_list_topics_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.list_topics", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.ListTopics", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "ListTopics" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.ListTopicsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.services.managed_kafka.pagers.ListTopicsPager", + "shortName": "list_topics" + }, + "description": "Sample for ListTopics", + "file": "managedkafka_v1_generated_managed_kafka_list_topics_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_ListTopics_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_list_topics_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.update_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.UpdateCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.UpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.managedkafka_v1.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "managedkafka_v1_generated_managed_kafka_update_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_UpdateCluster_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_update_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.update_cluster", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.UpdateCluster", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.UpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.managedkafka_v1.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "managedkafka_v1_generated_managed_kafka_update_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_UpdateCluster_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_update_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.update_consumer_group", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.UpdateConsumerGroup", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "UpdateConsumerGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.UpdateConsumerGroupRequest" + }, + { + "name": "consumer_group", + "type": "google.cloud.managedkafka_v1.types.ConsumerGroup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.ConsumerGroup", + "shortName": "update_consumer_group" + }, + "description": "Sample for UpdateConsumerGroup", + "file": "managedkafka_v1_generated_managed_kafka_update_consumer_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_UpdateConsumerGroup_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_update_consumer_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.update_consumer_group", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.UpdateConsumerGroup", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "UpdateConsumerGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.UpdateConsumerGroupRequest" + }, + { + "name": "consumer_group", + "type": "google.cloud.managedkafka_v1.types.ConsumerGroup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.ConsumerGroup", + "shortName": "update_consumer_group" + }, + "description": "Sample for UpdateConsumerGroup", + "file": "managedkafka_v1_generated_managed_kafka_update_consumer_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_UpdateConsumerGroup_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_update_consumer_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient", + "shortName": "ManagedKafkaAsyncClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaAsyncClient.update_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.UpdateTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "UpdateTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.UpdateTopicRequest" + }, + { + "name": "topic", + "type": "google.cloud.managedkafka_v1.types.Topic" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Topic", + "shortName": "update_topic" + }, + "description": "Sample for UpdateTopic", + "file": "managedkafka_v1_generated_managed_kafka_update_topic_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_UpdateTopic_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_update_topic_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient", + "shortName": "ManagedKafkaClient" + }, + "fullName": "google.cloud.managedkafka_v1.ManagedKafkaClient.update_topic", + "method": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka.UpdateTopic", + "service": { + "fullName": "google.cloud.managedkafka.v1.ManagedKafka", + "shortName": "ManagedKafka" + }, + "shortName": "UpdateTopic" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.managedkafka_v1.types.UpdateTopicRequest" + }, + { + "name": "topic", + "type": "google.cloud.managedkafka_v1.types.Topic" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.managedkafka_v1.types.Topic", + "shortName": "update_topic" + }, + "description": "Sample for UpdateTopic", + "file": "managedkafka_v1_generated_managed_kafka_update_topic_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "managedkafka_v1_generated_ManagedKafka_UpdateTopic_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "managedkafka_v1_generated_managed_kafka_update_topic_sync.py" + } + ] +} diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/scripts/fixup_managedkafka_v1_keywords.py b/owl-bot-staging/google-cloud-managedkafka/v1/scripts/fixup_managedkafka_v1_keywords.py new file mode 100644 index 000000000000..8eafa521a8b4 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/scripts/fixup_managedkafka_v1_keywords.py @@ -0,0 +1,189 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class managedkafkaCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'create_cluster': ('parent', 'cluster_id', 'cluster', 'request_id', ), + 'create_topic': ('parent', 'topic_id', 'topic', ), + 'delete_cluster': ('name', 'request_id', ), + 'delete_consumer_group': ('name', ), + 'delete_topic': ('name', ), + 'get_cluster': ('name', ), + 'get_consumer_group': ('name', ), + 'get_topic': ('name', ), + 'list_clusters': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ), + 'list_consumer_groups': ('parent', 'page_size', 'page_token', ), + 'list_topics': ('parent', 'page_size', 'page_token', ), + 'update_cluster': ('update_mask', 'cluster', 'request_id', ), + 'update_consumer_group': ('update_mask', 'consumer_group', ), + 'update_topic': ('update_mask', 'topic', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=managedkafkaCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the managedkafka client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/setup.py b/owl-bot-staging/google-cloud-managedkafka/v1/setup.py new file mode 100644 index 000000000000..bab4b7a3c5dc --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/setup.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import re + +import setuptools # type: ignore + +package_root = os.path.abspath(os.path.dirname(__file__)) + +name = 'google-cloud-managedkafka' + + +description = "Google Cloud Managedkafka API client library" + +version = None + +with open(os.path.join(package_root, 'google/cloud/managedkafka/gapic_version.py')) as fp: + version_candidates = re.findall(r"(?<=\")\d+.\d+.\d+(?=\")", fp.read()) + assert (len(version_candidates) == 1) + version = version_candidates[0] + +if version[0] == "0": + release_status = "Development Status :: 4 - Beta" +else: + release_status = "Development Status :: 5 - Production/Stable" + +dependencies = [ + "google-api-core[grpc] >= 1.34.1, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + # Exclude incompatible versions of `google-auth` + # See https://github.com/googleapis/google-cloud-python/issues/12364 + "google-auth >= 2.14.1, <3.0.0dev,!=2.24.0,!=2.25.0", + "proto-plus >= 1.22.3, <2.0.0dev", + "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", +] +url = "https://github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-managedkafka" + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +packages = [ + package + for package in setuptools.find_namespace_packages() + if package.startswith("google") +] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url=url, + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + python_requires=">=3.7", + install_requires=dependencies, + include_package_data=True, + zip_safe=False, +) diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.10.txt b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.10.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.10.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.11.txt b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.11.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.11.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.12.txt b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.12.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.12.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.7.txt b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.7.txt new file mode 100644 index 000000000000..fc812592b0ee --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.7.txt @@ -0,0 +1,10 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List all library dependencies and extras in this file. +# Pin the version to the lower bound. +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.34.1 +google-auth==2.14.1 +proto-plus==1.22.3 +protobuf==3.20.2 diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.8.txt b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.8.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.8.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.9.txt b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.9.txt new file mode 100644 index 000000000000..ed7f9aed2559 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/testing/constraints-3.9.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/tests/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/tests/__init__.py new file mode 100644 index 000000000000..7b3de3117f38 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/__init__.py new file mode 100644 index 000000000000..7b3de3117f38 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/__init__.py new file mode 100644 index 000000000000..7b3de3117f38 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/__init__.py b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/__init__.py new file mode 100644 index 000000000000..7b3de3117f38 --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/test_managed_kafka.py b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/test_managed_kafka.py new file mode 100644 index 000000000000..341031debe4f --- /dev/null +++ b/owl-bot-staging/google-cloud-managedkafka/v1/tests/unit/gapic/managedkafka_v1/test_managed_kafka.py @@ -0,0 +1,12567 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.location import locations_pb2 +from google.cloud.managedkafka_v1.services.managed_kafka import ManagedKafkaAsyncClient +from google.cloud.managedkafka_v1.services.managed_kafka import ManagedKafkaClient +from google.cloud.managedkafka_v1.services.managed_kafka import pagers +from google.cloud.managedkafka_v1.services.managed_kafka import transports +from google.cloud.managedkafka_v1.types import managed_kafka +from google.cloud.managedkafka_v1.types import resources +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return "test.{UNIVERSE_DOMAIN}" if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) else client._DEFAULT_ENDPOINT_TEMPLATE + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ManagedKafkaClient._get_default_mtls_endpoint(None) is None + assert ManagedKafkaClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ManagedKafkaClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ManagedKafkaClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ManagedKafkaClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ManagedKafkaClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + +def test__read_environment_variables(): + assert ManagedKafkaClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert ManagedKafkaClient._read_environment_variables() == (True, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert ManagedKafkaClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError) as excinfo: + ManagedKafkaClient._read_environment_variables() + assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert ManagedKafkaClient._read_environment_variables() == (False, "never", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert ManagedKafkaClient._read_environment_variables() == (False, "always", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert ManagedKafkaClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + ManagedKafkaClient._read_environment_variables() + assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert ManagedKafkaClient._read_environment_variables() == (False, "auto", "foo.com") + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert ManagedKafkaClient._get_client_cert_source(None, False) is None + assert ManagedKafkaClient._get_client_cert_source(mock_provided_cert_source, False) is None + assert ManagedKafkaClient._get_client_cert_source(mock_provided_cert_source, True) == mock_provided_cert_source + + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_default_cert_source): + assert ManagedKafkaClient._get_client_cert_source(None, True) is mock_default_cert_source + assert ManagedKafkaClient._get_client_cert_source(mock_provided_cert_source, "true") is mock_provided_cert_source + +@mock.patch.object(ManagedKafkaClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaClient)) +@mock.patch.object(ManagedKafkaAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaAsyncClient)) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = ManagedKafkaClient._DEFAULT_UNIVERSE + default_endpoint = ManagedKafkaClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) + mock_universe = "bar.com" + mock_endpoint = ManagedKafkaClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) + + assert ManagedKafkaClient._get_api_endpoint(api_override, mock_client_cert_source, default_universe, "always") == api_override + assert ManagedKafkaClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "auto") == ManagedKafkaClient.DEFAULT_MTLS_ENDPOINT + assert ManagedKafkaClient._get_api_endpoint(None, None, default_universe, "auto") == default_endpoint + assert ManagedKafkaClient._get_api_endpoint(None, None, default_universe, "always") == ManagedKafkaClient.DEFAULT_MTLS_ENDPOINT + assert ManagedKafkaClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "always") == ManagedKafkaClient.DEFAULT_MTLS_ENDPOINT + assert ManagedKafkaClient._get_api_endpoint(None, None, mock_universe, "never") == mock_endpoint + assert ManagedKafkaClient._get_api_endpoint(None, None, default_universe, "never") == default_endpoint + + with pytest.raises(MutualTLSChannelError) as excinfo: + ManagedKafkaClient._get_api_endpoint(None, mock_client_cert_source, mock_universe, "auto") + assert str(excinfo.value) == "mTLS is not supported in any universe other than googleapis.com." + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ManagedKafkaClient._get_universe_domain(client_universe_domain, universe_domain_env) == client_universe_domain + assert ManagedKafkaClient._get_universe_domain(None, universe_domain_env) == universe_domain_env + assert ManagedKafkaClient._get_universe_domain(None, None) == ManagedKafkaClient._DEFAULT_UNIVERSE + + with pytest.raises(ValueError) as excinfo: + ManagedKafkaClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport, "grpc"), + (ManagedKafkaClient, transports.ManagedKafkaRestTransport, "rest"), +]) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + transport=transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [int(part) for part in google.auth.__version__.split(".")[0:2]] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class( + transport=transport_class(credentials=credentials) + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert str(excinfo.value) == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [int(part) for part in api_core_version.__version__.split(".")[0:2]] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class(client_options={"universe_domain": "bar.com"}, transport=transport_class(credentials=ga_credentials.AnonymousCredentials(),)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert str(excinfo.value) == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize("client_class,transport_name", [ + (ManagedKafkaClient, "grpc"), + (ManagedKafkaAsyncClient, "grpc_asyncio"), + (ManagedKafkaClient, "rest"), +]) +def test_managed_kafka_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'managedkafka.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://managedkafka.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ManagedKafkaGrpcTransport, "grpc"), + (transports.ManagedKafkaGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.ManagedKafkaRestTransport, "rest"), +]) +def test_managed_kafka_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (ManagedKafkaClient, "grpc"), + (ManagedKafkaAsyncClient, "grpc_asyncio"), + (ManagedKafkaClient, "rest"), +]) +def test_managed_kafka_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'managedkafka.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://managedkafka.googleapis.com' + ) + + +def test_managed_kafka_client_get_transport_class(): + transport = ManagedKafkaClient.get_transport_class() + available_transports = [ + transports.ManagedKafkaGrpcTransport, + transports.ManagedKafkaRestTransport, + ] + assert transport in available_transports + + transport = ManagedKafkaClient.get_transport_class("grpc") + assert transport == transports.ManagedKafkaGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport, "grpc"), + (ManagedKafkaAsyncClient, transports.ManagedKafkaGrpcAsyncIOTransport, "grpc_asyncio"), + (ManagedKafkaClient, transports.ManagedKafkaRestTransport, "rest"), +]) +@mock.patch.object(ManagedKafkaClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaClient)) +@mock.patch.object(ManagedKafkaAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaAsyncClient)) +def test_managed_kafka_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ManagedKafkaClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ManagedKafkaClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport, "grpc", "true"), + (ManagedKafkaAsyncClient, transports.ManagedKafkaGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport, "grpc", "false"), + (ManagedKafkaAsyncClient, transports.ManagedKafkaGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (ManagedKafkaClient, transports.ManagedKafkaRestTransport, "rest", "true"), + (ManagedKafkaClient, transports.ManagedKafkaRestTransport, "rest", "false"), +]) +@mock.patch.object(ManagedKafkaClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaClient)) +@mock.patch.object(ManagedKafkaAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_managed_kafka_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + ManagedKafkaClient, ManagedKafkaAsyncClient +]) +@mock.patch.object(ManagedKafkaClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ManagedKafkaClient)) +@mock.patch.object(ManagedKafkaAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ManagedKafkaAsyncClient)) +def test_managed_kafka_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + +@pytest.mark.parametrize("client_class", [ + ManagedKafkaClient, ManagedKafkaAsyncClient +]) +@mock.patch.object(ManagedKafkaClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaClient)) +@mock.patch.object(ManagedKafkaAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(ManagedKafkaAsyncClient)) +def test_managed_kafka_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = ManagedKafkaClient._DEFAULT_UNIVERSE + default_endpoint = ManagedKafkaClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) + mock_universe = "bar.com" + mock_endpoint = ManagedKafkaClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"): + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=api_override) + client = client_class(client_options=options, credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class(client_options=options, credentials=ga_credentials.AnonymousCredentials()) + else: + client = client_class(client_options=options, credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == (mock_endpoint if universe_exists else default_endpoint) + assert client.universe_domain == (mock_universe if universe_exists else default_universe) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(client_options=options, credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport, "grpc"), + (ManagedKafkaAsyncClient, transports.ManagedKafkaGrpcAsyncIOTransport, "grpc_asyncio"), + (ManagedKafkaClient, transports.ManagedKafkaRestTransport, "rest"), +]) +def test_managed_kafka_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport, "grpc", grpc_helpers), + (ManagedKafkaAsyncClient, transports.ManagedKafkaGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (ManagedKafkaClient, transports.ManagedKafkaRestTransport, "rest", None), +]) +def test_managed_kafka_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_managed_kafka_client_client_options_from_dict(): + with mock.patch('google.cloud.managedkafka_v1.services.managed_kafka.transports.ManagedKafkaGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ManagedKafkaClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport, "grpc", grpc_helpers), + (ManagedKafkaAsyncClient, transports.ManagedKafkaGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_managed_kafka_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "managedkafka.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=None, + default_host="managedkafka.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.ListClustersRequest, + dict, +]) +def test_list_clusters(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListClustersResponse( + next_page_token='next_page_token_value', + unreachable=['unreachable_value'], + ) + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.ListClustersRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + assert response.next_page_token == 'next_page_token_value' + assert response.unreachable == ['unreachable_value'] + + +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListClustersRequest() + + +def test_list_clusters_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.ListClustersRequest( + parent='parent_value', + page_token='page_token_value', + filter='filter_value', + order_by='order_by_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.list_clusters(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListClustersRequest( + parent='parent_value', + page_token='page_token_value', + filter='filter_value', + order_by='order_by_value', + ) + +def test_list_clusters_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_list_clusters_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListClustersResponse( + next_page_token='next_page_token_value', + unreachable=['unreachable_value'], + )) + response = await client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListClustersRequest() + +@pytest.mark.asyncio +async def test_list_clusters_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.list_clusters in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.list_clusters] = mock_rpc + + request = {} + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.ListClustersRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListClustersResponse( + next_page_token='next_page_token_value', + unreachable=['unreachable_value'], + )) + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.ListClustersRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersAsyncPager) + assert response.next_page_token == 'next_page_token_value' + assert response.unreachable == ['unreachable_value'] + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.ListClustersRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = managed_kafka.ListClustersResponse() + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.ListClustersRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListClustersResponse()) + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_clusters_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_clusters_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + managed_kafka.ListClustersRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListClustersResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + managed_kafka.ListClustersRequest(), + parent='parent_value', + ) + + +def test_list_clusters_pager(transport_name: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + resources.Cluster(), + ], + next_page_token='abc', + ), + managed_kafka.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + ], + next_page_token='ghi', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_clusters(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resources.Cluster) + for i in results) +def test_list_clusters_pages(transport_name: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + resources.Cluster(), + ], + next_page_token='abc', + ), + managed_kafka.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + ], + next_page_token='ghi', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + ], + ), + RuntimeError, + ) + pages = list(client.list_clusters(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_clusters_async_pager(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + resources.Cluster(), + ], + next_page_token='abc', + ), + managed_kafka.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + ], + next_page_token='ghi', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_clusters(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resources.Cluster) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_clusters_async_pages(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + resources.Cluster(), + ], + next_page_token='abc', + ), + managed_kafka.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + ], + next_page_token='ghi', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_clusters(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + managed_kafka.GetClusterRequest, + dict, +]) +def test_get_cluster(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Cluster( + name='name_value', + state=resources.Cluster.State.CREATING, + ) + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.GetClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Cluster) + assert response.name == 'name_value' + assert response.state == resources.Cluster.State.CREATING + + +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetClusterRequest() + + +def test_get_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.GetClusterRequest( + name='name_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.get_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetClusterRequest( + name='name_value', + ) + +def test_get_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_get_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Cluster( + name='name_value', + state=resources.Cluster.State.CREATING, + )) + response = await client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetClusterRequest() + +@pytest.mark.asyncio +async def test_get_cluster_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.get_cluster in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.get_cluster] = mock_rpc + + request = {} + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.GetClusterRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(resources.Cluster( + name='name_value', + state=resources.Cluster.State.CREATING, + )) + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.GetClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Cluster) + assert response.name == 'name_value' + assert response.state == resources.Cluster.State.CREATING + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.GetClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = resources.Cluster() + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.GetClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Cluster()) + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_cluster_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Cluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_cluster_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + managed_kafka.GetClusterRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + managed_kafka.GetClusterRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.CreateClusterRequest, + dict, +]) +def test_create_cluster(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.CreateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.CreateClusterRequest() + + +def test_create_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.CreateClusterRequest( + parent='parent_value', + cluster_id='cluster_id_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.create_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.CreateClusterRequest( + parent='parent_value', + cluster_id='cluster_id_value', + ) + +def test_create_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_create_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.CreateClusterRequest() + +@pytest.mark.asyncio +async def test_create_cluster_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.create_cluster in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.create_cluster] = mock_rpc + + request = {} + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.CreateClusterRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.CreateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.CreateClusterRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.CreateClusterRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_cluster_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + parent='parent_value', + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].cluster + mock_val = resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))) + assert arg == mock_val + arg = args[0].cluster_id + mock_val = 'cluster_id_value' + assert arg == mock_val + + +def test_create_cluster_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + managed_kafka.CreateClusterRequest(), + parent='parent_value', + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + cluster_id='cluster_id_value', + ) + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + parent='parent_value', + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].cluster + mock_val = resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))) + assert arg == mock_val + arg = args[0].cluster_id + mock_val = 'cluster_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + managed_kafka.CreateClusterRequest(), + parent='parent_value', + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + cluster_id='cluster_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.UpdateClusterRequest, + dict, +]) +def test_update_cluster(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.UpdateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateClusterRequest() + + +def test_update_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.UpdateClusterRequest( + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.update_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateClusterRequest( + ) + +def test_update_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_update_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateClusterRequest() + +@pytest.mark.asyncio +async def test_update_cluster_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.update_cluster in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.update_cluster] = mock_rpc + + request = {} + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.UpdateClusterRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.UpdateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.UpdateClusterRequest() + + request.cluster.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'cluster.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.UpdateClusterRequest() + + request.cluster.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'cluster.name=name_value', + ) in kw['metadata'] + + +def test_update_cluster_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_cluster_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + managed_kafka.UpdateClusterRequest(), + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + managed_kafka.UpdateClusterRequest(), + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.DeleteClusterRequest, + dict, +]) +def test_delete_cluster(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.DeleteClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteClusterRequest() + + +def test_delete_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.DeleteClusterRequest( + name='name_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.delete_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteClusterRequest( + name='name_value', + ) + +def test_delete_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_delete_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteClusterRequest() + +@pytest.mark.asyncio +async def test_delete_cluster_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.delete_cluster in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.delete_cluster] = mock_rpc + + request = {} + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.DeleteClusterRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.DeleteClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.DeleteClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.DeleteClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_cluster_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_cluster_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + managed_kafka.DeleteClusterRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + managed_kafka.DeleteClusterRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.ListTopicsRequest, + dict, +]) +def test_list_topics(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListTopicsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_topics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.ListTopicsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTopicsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_topics_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.list_topics() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListTopicsRequest() + + +def test_list_topics_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.ListTopicsRequest( + parent='parent_value', + page_token='page_token_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.list_topics(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListTopicsRequest( + parent='parent_value', + page_token='page_token_value', + ) + +def test_list_topics_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_topics in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.list_topics] = mock_rpc + request = {} + client.list_topics(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_topics(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_list_topics_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListTopicsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_topics() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListTopicsRequest() + +@pytest.mark.asyncio +async def test_list_topics_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.list_topics in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.list_topics] = mock_rpc + + request = {} + await client.list_topics(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_topics(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_list_topics_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.ListTopicsRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListTopicsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_topics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.ListTopicsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTopicsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_topics_async_from_dict(): + await test_list_topics_async(request_type=dict) + + +def test_list_topics_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.ListTopicsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + call.return_value = managed_kafka.ListTopicsResponse() + client.list_topics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_topics_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.ListTopicsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListTopicsResponse()) + await client.list_topics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_topics_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListTopicsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_topics( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_topics_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_topics( + managed_kafka.ListTopicsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_topics_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListTopicsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListTopicsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_topics( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_topics_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_topics( + managed_kafka.ListTopicsRequest(), + parent='parent_value', + ) + + +def test_list_topics_pager(transport_name: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + resources.Topic(), + ], + next_page_token='abc', + ), + managed_kafka.ListTopicsResponse( + topics=[], + next_page_token='def', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + ], + next_page_token='ghi', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_topics(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resources.Topic) + for i in results) +def test_list_topics_pages(transport_name: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + resources.Topic(), + ], + next_page_token='abc', + ), + managed_kafka.ListTopicsResponse( + topics=[], + next_page_token='def', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + ], + next_page_token='ghi', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + ], + ), + RuntimeError, + ) + pages = list(client.list_topics(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_topics_async_pager(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + resources.Topic(), + ], + next_page_token='abc', + ), + managed_kafka.ListTopicsResponse( + topics=[], + next_page_token='def', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + ], + next_page_token='ghi', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_topics(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resources.Topic) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_topics_async_pages(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_topics), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + resources.Topic(), + ], + next_page_token='abc', + ), + managed_kafka.ListTopicsResponse( + topics=[], + next_page_token='def', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + ], + next_page_token='ghi', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_topics(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + managed_kafka.GetTopicRequest, + dict, +]) +def test_get_topic(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + ) + response = client.get_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.GetTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + + +def test_get_topic_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.get_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetTopicRequest() + + +def test_get_topic_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.GetTopicRequest( + name='name_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.get_topic(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetTopicRequest( + name='name_value', + ) + +def test_get_topic_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.get_topic] = mock_rpc + request = {} + client.get_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_get_topic_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + )) + response = await client.get_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetTopicRequest() + +@pytest.mark.asyncio +async def test_get_topic_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.get_topic in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.get_topic] = mock_rpc + + request = {} + await client.get_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_get_topic_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.GetTopicRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + )) + response = await client.get_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.GetTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + + +@pytest.mark.asyncio +async def test_get_topic_async_from_dict(): + await test_get_topic_async(request_type=dict) + + +def test_get_topic_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.GetTopicRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + call.return_value = resources.Topic() + client.get_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_topic_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.GetTopicRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic()) + await client.get_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_topic_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_topic( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_topic_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_topic( + managed_kafka.GetTopicRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_topic_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_topic( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_topic_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_topic( + managed_kafka.GetTopicRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.CreateTopicRequest, + dict, +]) +def test_create_topic(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + ) + response = client.create_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.CreateTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + + +def test_create_topic_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.create_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.CreateTopicRequest() + + +def test_create_topic_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.CreateTopicRequest( + parent='parent_value', + topic_id='topic_id_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.create_topic(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.CreateTopicRequest( + parent='parent_value', + topic_id='topic_id_value', + ) + +def test_create_topic_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.create_topic] = mock_rpc + request = {} + client.create_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_create_topic_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + )) + response = await client.create_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.CreateTopicRequest() + +@pytest.mark.asyncio +async def test_create_topic_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.create_topic in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.create_topic] = mock_rpc + + request = {} + await client.create_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.create_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_create_topic_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.CreateTopicRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + )) + response = await client.create_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.CreateTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + + +@pytest.mark.asyncio +async def test_create_topic_async_from_dict(): + await test_create_topic_async(request_type=dict) + + +def test_create_topic_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.CreateTopicRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + call.return_value = resources.Topic() + client.create_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_topic_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.CreateTopicRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic()) + await client.create_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_topic_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_topic( + parent='parent_value', + topic=resources.Topic(name='name_value'), + topic_id='topic_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].topic + mock_val = resources.Topic(name='name_value') + assert arg == mock_val + arg = args[0].topic_id + mock_val = 'topic_id_value' + assert arg == mock_val + + +def test_create_topic_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_topic( + managed_kafka.CreateTopicRequest(), + parent='parent_value', + topic=resources.Topic(name='name_value'), + topic_id='topic_id_value', + ) + +@pytest.mark.asyncio +async def test_create_topic_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_topic( + parent='parent_value', + topic=resources.Topic(name='name_value'), + topic_id='topic_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].topic + mock_val = resources.Topic(name='name_value') + assert arg == mock_val + arg = args[0].topic_id + mock_val = 'topic_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_topic_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_topic( + managed_kafka.CreateTopicRequest(), + parent='parent_value', + topic=resources.Topic(name='name_value'), + topic_id='topic_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.UpdateTopicRequest, + dict, +]) +def test_update_topic(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + ) + response = client.update_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.UpdateTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + + +def test_update_topic_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.update_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateTopicRequest() + + +def test_update_topic_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.UpdateTopicRequest( + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.update_topic(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateTopicRequest( + ) + +def test_update_topic_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.update_topic] = mock_rpc + request = {} + client.update_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_update_topic_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + )) + response = await client.update_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateTopicRequest() + +@pytest.mark.asyncio +async def test_update_topic_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.update_topic in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.update_topic] = mock_rpc + + request = {} + await client.update_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.update_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_update_topic_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.UpdateTopicRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + )) + response = await client.update_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.UpdateTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + + +@pytest.mark.asyncio +async def test_update_topic_async_from_dict(): + await test_update_topic_async(request_type=dict) + + +def test_update_topic_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.UpdateTopicRequest() + + request.topic.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + call.return_value = resources.Topic() + client.update_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'topic.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_topic_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.UpdateTopicRequest() + + request.topic.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic()) + await client.update_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'topic.name=name_value', + ) in kw['metadata'] + + +def test_update_topic_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_topic( + topic=resources.Topic(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].topic + mock_val = resources.Topic(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_topic_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_topic( + managed_kafka.UpdateTopicRequest(), + topic=resources.Topic(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_topic_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.Topic() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Topic()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_topic( + topic=resources.Topic(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].topic + mock_val = resources.Topic(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_topic_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_topic( + managed_kafka.UpdateTopicRequest(), + topic=resources.Topic(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.DeleteTopicRequest, + dict, +]) +def test_delete_topic(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.DeleteTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_topic_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.delete_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteTopicRequest() + + +def test_delete_topic_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.DeleteTopicRequest( + name='name_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.delete_topic(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteTopicRequest( + name='name_value', + ) + +def test_delete_topic_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.delete_topic] = mock_rpc + request = {} + client.delete_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_delete_topic_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_topic() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteTopicRequest() + +@pytest.mark.asyncio +async def test_delete_topic_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.delete_topic in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.delete_topic] = mock_rpc + + request = {} + await client.delete_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_delete_topic_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.DeleteTopicRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.DeleteTopicRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_topic_async_from_dict(): + await test_delete_topic_async(request_type=dict) + + +def test_delete_topic_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.DeleteTopicRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + call.return_value = None + client.delete_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_topic_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.DeleteTopicRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_topic_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_topic( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_topic_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_topic( + managed_kafka.DeleteTopicRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_topic_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_topic), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_topic( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_topic_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_topic( + managed_kafka.DeleteTopicRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.ListConsumerGroupsRequest, + dict, +]) +def test_list_consumer_groups(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListConsumerGroupsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_consumer_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.ListConsumerGroupsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListConsumerGroupsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_consumer_groups_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.list_consumer_groups() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListConsumerGroupsRequest() + + +def test_list_consumer_groups_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.ListConsumerGroupsRequest( + parent='parent_value', + page_token='page_token_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.list_consumer_groups(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListConsumerGroupsRequest( + parent='parent_value', + page_token='page_token_value', + ) + +def test_list_consumer_groups_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_consumer_groups in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.list_consumer_groups] = mock_rpc + request = {} + client.list_consumer_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_consumer_groups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_list_consumer_groups_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListConsumerGroupsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_consumer_groups() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.ListConsumerGroupsRequest() + +@pytest.mark.asyncio +async def test_list_consumer_groups_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.list_consumer_groups in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.list_consumer_groups] = mock_rpc + + request = {} + await client.list_consumer_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_consumer_groups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_list_consumer_groups_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.ListConsumerGroupsRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListConsumerGroupsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_consumer_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.ListConsumerGroupsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListConsumerGroupsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_consumer_groups_async_from_dict(): + await test_list_consumer_groups_async(request_type=dict) + + +def test_list_consumer_groups_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.ListConsumerGroupsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + call.return_value = managed_kafka.ListConsumerGroupsResponse() + client.list_consumer_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_consumer_groups_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.ListConsumerGroupsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListConsumerGroupsResponse()) + await client.list_consumer_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_consumer_groups_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListConsumerGroupsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_consumer_groups( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_consumer_groups_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_consumer_groups( + managed_kafka.ListConsumerGroupsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_consumer_groups_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = managed_kafka.ListConsumerGroupsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(managed_kafka.ListConsumerGroupsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_consumer_groups( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_consumer_groups_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_consumer_groups( + managed_kafka.ListConsumerGroupsRequest(), + parent='parent_value', + ) + + +def test_list_consumer_groups_pager(transport_name: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + next_page_token='abc', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[], + next_page_token='def', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + ], + next_page_token='ghi', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_consumer_groups(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resources.ConsumerGroup) + for i in results) +def test_list_consumer_groups_pages(transport_name: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + next_page_token='abc', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[], + next_page_token='def', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + ], + next_page_token='ghi', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + ), + RuntimeError, + ) + pages = list(client.list_consumer_groups(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_consumer_groups_async_pager(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + next_page_token='abc', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[], + next_page_token='def', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + ], + next_page_token='ghi', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_consumer_groups(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, resources.ConsumerGroup) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_consumer_groups_async_pages(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_consumer_groups), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + next_page_token='abc', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[], + next_page_token='def', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + ], + next_page_token='ghi', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_consumer_groups(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + managed_kafka.GetConsumerGroupRequest, + dict, +]) +def test_get_consumer_group(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.ConsumerGroup( + name='name_value', + ) + response = client.get_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.GetConsumerGroupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.ConsumerGroup) + assert response.name == 'name_value' + + +def test_get_consumer_group_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.get_consumer_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetConsumerGroupRequest() + + +def test_get_consumer_group_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.GetConsumerGroupRequest( + name='name_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.get_consumer_group(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetConsumerGroupRequest( + name='name_value', + ) + +def test_get_consumer_group_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_consumer_group in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.get_consumer_group] = mock_rpc + request = {} + client.get_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_get_consumer_group_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup( + name='name_value', + )) + response = await client.get_consumer_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.GetConsumerGroupRequest() + +@pytest.mark.asyncio +async def test_get_consumer_group_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.get_consumer_group in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.get_consumer_group] = mock_rpc + + request = {} + await client.get_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_get_consumer_group_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.GetConsumerGroupRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup( + name='name_value', + )) + response = await client.get_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.GetConsumerGroupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.ConsumerGroup) + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_consumer_group_async_from_dict(): + await test_get_consumer_group_async(request_type=dict) + + +def test_get_consumer_group_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.GetConsumerGroupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + call.return_value = resources.ConsumerGroup() + client.get_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_consumer_group_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.GetConsumerGroupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup()) + await client.get_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_consumer_group_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.ConsumerGroup() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_consumer_group( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_consumer_group_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_consumer_group( + managed_kafka.GetConsumerGroupRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_consumer_group_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.ConsumerGroup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_consumer_group( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_consumer_group_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_consumer_group( + managed_kafka.GetConsumerGroupRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.UpdateConsumerGroupRequest, + dict, +]) +def test_update_consumer_group(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.ConsumerGroup( + name='name_value', + ) + response = client.update_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.UpdateConsumerGroupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.ConsumerGroup) + assert response.name == 'name_value' + + +def test_update_consumer_group_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.update_consumer_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateConsumerGroupRequest() + + +def test_update_consumer_group_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.UpdateConsumerGroupRequest( + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.update_consumer_group(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateConsumerGroupRequest( + ) + +def test_update_consumer_group_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_consumer_group in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.update_consumer_group] = mock_rpc + request = {} + client.update_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_update_consumer_group_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup( + name='name_value', + )) + response = await client.update_consumer_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.UpdateConsumerGroupRequest() + +@pytest.mark.asyncio +async def test_update_consumer_group_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.update_consumer_group in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.update_consumer_group] = mock_rpc + + request = {} + await client.update_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.update_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_update_consumer_group_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.UpdateConsumerGroupRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup( + name='name_value', + )) + response = await client.update_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.UpdateConsumerGroupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.ConsumerGroup) + assert response.name == 'name_value' + + +@pytest.mark.asyncio +async def test_update_consumer_group_async_from_dict(): + await test_update_consumer_group_async(request_type=dict) + + +def test_update_consumer_group_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.UpdateConsumerGroupRequest() + + request.consumer_group.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + call.return_value = resources.ConsumerGroup() + client.update_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'consumer_group.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_consumer_group_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.UpdateConsumerGroupRequest() + + request.consumer_group.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup()) + await client.update_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'consumer_group.name=name_value', + ) in kw['metadata'] + + +def test_update_consumer_group_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.ConsumerGroup() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_consumer_group( + consumer_group=resources.ConsumerGroup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].consumer_group + mock_val = resources.ConsumerGroup(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_consumer_group_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_consumer_group( + managed_kafka.UpdateConsumerGroupRequest(), + consumer_group=resources.ConsumerGroup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_consumer_group_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = resources.ConsumerGroup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.ConsumerGroup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_consumer_group( + consumer_group=resources.ConsumerGroup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].consumer_group + mock_val = resources.ConsumerGroup(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_consumer_group_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_consumer_group( + managed_kafka.UpdateConsumerGroupRequest(), + consumer_group=resources.ConsumerGroup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.DeleteConsumerGroupRequest, + dict, +]) +def test_delete_consumer_group(request_type, transport: str = 'grpc'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = managed_kafka.DeleteConsumerGroupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_consumer_group_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.delete_consumer_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteConsumerGroupRequest() + + +def test_delete_consumer_group_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = managed_kafka.DeleteConsumerGroupRequest( + name='name_value', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + call.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client.delete_consumer_group(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteConsumerGroupRequest( + name='name_value', + ) + +def test_delete_consumer_group_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_consumer_group in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.delete_consumer_group] = mock_rpc + request = {} + client.delete_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_delete_consumer_group_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_consumer_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == managed_kafka.DeleteConsumerGroupRequest() + +@pytest.mark.asyncio +async def test_delete_consumer_group_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._client._transport.delete_consumer_group in client._client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[client._client._transport.delete_consumer_group] = mock_rpc + + request = {} + await client.delete_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + +@pytest.mark.asyncio +async def test_delete_consumer_group_async(transport: str = 'grpc_asyncio', request_type=managed_kafka.DeleteConsumerGroupRequest): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = managed_kafka.DeleteConsumerGroupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_consumer_group_async_from_dict(): + await test_delete_consumer_group_async(request_type=dict) + + +def test_delete_consumer_group_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.DeleteConsumerGroupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + call.return_value = None + client.delete_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_consumer_group_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = managed_kafka.DeleteConsumerGroupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_consumer_group_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_consumer_group( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_consumer_group_flattened_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_consumer_group( + managed_kafka.DeleteConsumerGroupRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_consumer_group_flattened_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_consumer_group), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_consumer_group( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_consumer_group_flattened_error_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_consumer_group( + managed_kafka.DeleteConsumerGroupRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.ListClustersRequest, + dict, +]) +def test_list_clusters_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListClustersResponse( + next_page_token='next_page_token_value', + unreachable=['unreachable_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = managed_kafka.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_clusters(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + assert response.next_page_token == 'next_page_token_value' + assert response.unreachable == ['unreachable_value'] + +def test_list_clusters_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_clusters_rest_required_fields(request_type=managed_kafka.ListClustersRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "order_by", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = managed_kafka.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_clusters(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_clusters_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_clusters._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "orderBy", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_clusters_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_list_clusters") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_list_clusters") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.ListClustersRequest.pb(managed_kafka.ListClustersRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = managed_kafka.ListClustersResponse.to_json(managed_kafka.ListClustersResponse()) + + request = managed_kafka.ListClustersRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = managed_kafka.ListClustersResponse() + + client.list_clusters(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_clusters_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.ListClustersRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_clusters(request) + + +def test_list_clusters_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListClustersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = managed_kafka.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_clusters(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/clusters" % client.transport._host, args[1]) + + +def test_list_clusters_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + managed_kafka.ListClustersRequest(), + parent='parent_value', + ) + + +def test_list_clusters_rest_pager(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + resources.Cluster(), + ], + next_page_token='abc', + ), + managed_kafka.ListClustersResponse( + clusters=[], + next_page_token='def', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + ], + next_page_token='ghi', + ), + managed_kafka.ListClustersResponse( + clusters=[ + resources.Cluster(), + resources.Cluster(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(managed_kafka.ListClustersResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + pager = client.list_clusters(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resources.Cluster) + for i in results) + + pages = list(client.list_clusters(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.GetClusterRequest, + dict, +]) +def test_get_cluster_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Cluster( + name='name_value', + state=resources.Cluster.State.CREATING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Cluster) + assert response.name == 'name_value' + assert response.state == resources.Cluster.State.CREATING + +def test_get_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_cluster_rest_required_fields(request_type=managed_kafka.GetClusterRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resources.Cluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resources.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_cluster(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_cluster_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cluster_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_get_cluster") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_get_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.GetClusterRequest.pb(managed_kafka.GetClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resources.Cluster.to_json(resources.Cluster()) + + request = managed_kafka.GetClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resources.Cluster() + + client.get_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_cluster_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.GetClusterRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_cluster(request) + + +def test_get_cluster_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Cluster() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/clusters/*}" % client.transport._host, args[1]) + + +def test_get_cluster_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + managed_kafka.GetClusterRequest(), + name='name_value', + ) + + +def test_get_cluster_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.CreateClusterRequest, + dict, +]) +def test_create_cluster_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request_init["cluster"] = {'gcp_config': {'access_config': {'network_configs': [{'subnet': 'subnet_value'}]}, 'kms_key': 'kms_key_value'}, 'name': 'name_value', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'labels': {}, 'capacity_config': {'vcpu_count': 1094, 'memory_bytes': 1311}, 'rebalance_config': {'mode': 1}, 'state': 1} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = managed_kafka.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + +def test_create_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_cluster_rest_required_fields(request_type=managed_kafka.CreateClusterRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + assert "clusterId" not in jsonified_request + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == request_init["cluster_id"] + + jsonified_request["parent"] = 'parent_value' + jsonified_request["clusterId"] = 'cluster_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("cluster_id", "request_id", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == 'cluster_id_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_cluster(request) + + expected_params = [ + ( + "clusterId", + "", + ), + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_cluster_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(("clusterId", "requestId", )) & set(("parent", "clusterId", "cluster", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cluster_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_create_cluster") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_create_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.CreateClusterRequest.pb(managed_kafka.CreateClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = managed_kafka.CreateClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_cluster_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.CreateClusterRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_cluster(request) + + +def test_create_cluster_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + cluster_id='cluster_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*}/clusters" % client.transport._host, args[1]) + + +def test_create_cluster_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + managed_kafka.CreateClusterRequest(), + parent='parent_value', + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + cluster_id='cluster_id_value', + ) + + +def test_create_cluster_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.UpdateClusterRequest, + dict, +]) +def test_update_cluster_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'cluster': {'name': 'projects/sample1/locations/sample2/clusters/sample3'}} + request_init["cluster"] = {'gcp_config': {'access_config': {'network_configs': [{'subnet': 'subnet_value'}]}, 'kms_key': 'kms_key_value'}, 'name': 'projects/sample1/locations/sample2/clusters/sample3', 'create_time': {'seconds': 751, 'nanos': 543}, 'update_time': {}, 'labels': {}, 'capacity_config': {'vcpu_count': 1094, 'memory_bytes': 1311}, 'rebalance_config': {'mode': 1}, 'state': 1} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = managed_kafka.UpdateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + +def test_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_cluster_rest_required_fields(request_type=managed_kafka.UpdateClusterRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id", "update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_cluster(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_cluster_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId", "updateMask", )) & set(("updateMask", "cluster", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_cluster_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_update_cluster") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_update_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.UpdateClusterRequest.pb(managed_kafka.UpdateClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = managed_kafka.UpdateClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_cluster_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.UpdateClusterRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'cluster': {'name': 'projects/sample1/locations/sample2/clusters/sample3'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_cluster(request) + + +def test_update_cluster_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'cluster': {'name': 'projects/sample1/locations/sample2/clusters/sample3'}} + + # get truthy value for each flattened field + mock_args = dict( + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{cluster.name=projects/*/locations/*/clusters/*}" % client.transport._host, args[1]) + + +def test_update_cluster_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + managed_kafka.UpdateClusterRequest(), + cluster=resources.Cluster(gcp_config=resources.GcpConfig(access_config=resources.AccessConfig(network_configs=[resources.NetworkConfig(subnet='subnet_value')]))), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_cluster_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.DeleteClusterRequest, + dict, +]) +def test_delete_cluster_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + +def test_delete_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_cluster_rest_required_fields(request_type=managed_kafka.DeleteClusterRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("request_id", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_cluster(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_cluster_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(("requestId", )) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_cluster_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_delete_cluster") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_delete_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.DeleteClusterRequest.pb(managed_kafka.DeleteClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = managed_kafka.DeleteClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_cluster_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.DeleteClusterRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_cluster(request) + + +def test_delete_cluster_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/clusters/*}" % client.transport._host, args[1]) + + +def test_delete_cluster_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + managed_kafka.DeleteClusterRequest(), + name='name_value', + ) + + +def test_delete_cluster_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.ListTopicsRequest, + dict, +]) +def test_list_topics_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListTopicsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = managed_kafka.ListTopicsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_topics(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTopicsPager) + assert response.next_page_token == 'next_page_token_value' + +def test_list_topics_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_topics in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.list_topics] = mock_rpc + + request = {} + client.list_topics(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_topics(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_topics_rest_required_fields(request_type=managed_kafka.ListTopicsRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_topics._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_topics._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListTopicsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = managed_kafka.ListTopicsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_topics(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_topics_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_topics._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_topics_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_list_topics") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_list_topics") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.ListTopicsRequest.pb(managed_kafka.ListTopicsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = managed_kafka.ListTopicsResponse.to_json(managed_kafka.ListTopicsResponse()) + + request = managed_kafka.ListTopicsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = managed_kafka.ListTopicsResponse() + + client.list_topics(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_topics_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.ListTopicsRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_topics(request) + + +def test_list_topics_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListTopicsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = managed_kafka.ListTopicsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_topics(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*/clusters/*}/topics" % client.transport._host, args[1]) + + +def test_list_topics_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_topics( + managed_kafka.ListTopicsRequest(), + parent='parent_value', + ) + + +def test_list_topics_rest_pager(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + resources.Topic(), + ], + next_page_token='abc', + ), + managed_kafka.ListTopicsResponse( + topics=[], + next_page_token='def', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + ], + next_page_token='ghi', + ), + managed_kafka.ListTopicsResponse( + topics=[ + resources.Topic(), + resources.Topic(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(managed_kafka.ListTopicsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + + pager = client.list_topics(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resources.Topic) + for i in results) + + pages = list(client.list_topics(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.GetTopicRequest, + dict, +]) +def test_get_topic_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_topic(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + +def test_get_topic_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.get_topic] = mock_rpc + + request = {} + client.get_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_topic_rest_required_fields(request_type=managed_kafka.GetTopicRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_topic._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_topic._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resources.Topic() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_topic(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_topic_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_topic._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_topic_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_get_topic") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_get_topic") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.GetTopicRequest.pb(managed_kafka.GetTopicRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resources.Topic.to_json(resources.Topic()) + + request = managed_kafka.GetTopicRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resources.Topic() + + client.get_topic(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_topic_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.GetTopicRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_topic(request) + + +def test_get_topic_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Topic() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_topic(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/clusters/*/topics/*}" % client.transport._host, args[1]) + + +def test_get_topic_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_topic( + managed_kafka.GetTopicRequest(), + name='name_value', + ) + + +def test_get_topic_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.CreateTopicRequest, + dict, +]) +def test_create_topic_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + request_init["topic"] = {'name': 'name_value', 'partition_count': 1634, 'replication_factor': 1912, 'configs': {}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = managed_kafka.CreateTopicRequest.meta.fields["topic"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["topic"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["topic"][field])): + del request_init["topic"][field][i][subfield] + else: + del request_init["topic"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_topic(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + +def test_create_topic_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.create_topic] = mock_rpc + + request = {} + client.create_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_topic_rest_required_fields(request_type=managed_kafka.CreateTopicRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["topic_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + assert "topicId" not in jsonified_request + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_topic._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "topicId" in jsonified_request + assert jsonified_request["topicId"] == request_init["topic_id"] + + jsonified_request["parent"] = 'parent_value' + jsonified_request["topicId"] = 'topic_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_topic._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("topic_id", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "topicId" in jsonified_request + assert jsonified_request["topicId"] == 'topic_id_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resources.Topic() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_topic(request) + + expected_params = [ + ( + "topicId", + "", + ), + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_topic_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_topic._get_unset_required_fields({}) + assert set(unset_fields) == (set(("topicId", )) & set(("parent", "topicId", "topic", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_topic_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_create_topic") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_create_topic") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.CreateTopicRequest.pb(managed_kafka.CreateTopicRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resources.Topic.to_json(resources.Topic()) + + request = managed_kafka.CreateTopicRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resources.Topic() + + client.create_topic(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_topic_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.CreateTopicRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_topic(request) + + +def test_create_topic_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Topic() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + topic=resources.Topic(name='name_value'), + topic_id='topic_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_topic(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*/clusters/*}/topics" % client.transport._host, args[1]) + + +def test_create_topic_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_topic( + managed_kafka.CreateTopicRequest(), + parent='parent_value', + topic=resources.Topic(name='name_value'), + topic_id='topic_id_value', + ) + + +def test_create_topic_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.UpdateTopicRequest, + dict, +]) +def test_update_topic_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'topic': {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'}} + request_init["topic"] = {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4', 'partition_count': 1634, 'replication_factor': 1912, 'configs': {}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = managed_kafka.UpdateTopicRequest.meta.fields["topic"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["topic"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["topic"][field])): + del request_init["topic"][field][i][subfield] + else: + del request_init["topic"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Topic( + name='name_value', + partition_count=1634, + replication_factor=1912, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_topic(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.Topic) + assert response.name == 'name_value' + assert response.partition_count == 1634 + assert response.replication_factor == 1912 + +def test_update_topic_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.update_topic] = mock_rpc + + request = {} + client.update_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_topic_rest_required_fields(request_type=managed_kafka.UpdateTopicRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_topic._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_topic._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resources.Topic() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_topic(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_topic_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_topic._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("updateMask", "topic", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_topic_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_update_topic") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_update_topic") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.UpdateTopicRequest.pb(managed_kafka.UpdateTopicRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resources.Topic.to_json(resources.Topic()) + + request = managed_kafka.UpdateTopicRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resources.Topic() + + client.update_topic(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_topic_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.UpdateTopicRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'topic': {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_topic(request) + + +def test_update_topic_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.Topic() + + # get arguments that satisfy an http rule for this method + sample_request = {'topic': {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'}} + + # get truthy value for each flattened field + mock_args = dict( + topic=resources.Topic(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.Topic.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_topic(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{topic.name=projects/*/locations/*/clusters/*/topics/*}" % client.transport._host, args[1]) + + +def test_update_topic_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_topic( + managed_kafka.UpdateTopicRequest(), + topic=resources.Topic(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_topic_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.DeleteTopicRequest, + dict, +]) +def test_delete_topic_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_topic(request) + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_topic_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_topic in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.delete_topic] = mock_rpc + + request = {} + client.delete_topic(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_topic(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_topic_rest_required_fields(request_type=managed_kafka.DeleteTopicRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_topic._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_topic._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_topic(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_topic_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_topic._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_topic_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_delete_topic") as pre: + pre.assert_not_called() + pb_message = managed_kafka.DeleteTopicRequest.pb(managed_kafka.DeleteTopicRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = managed_kafka.DeleteTopicRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_topic(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_topic_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.DeleteTopicRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_topic(request) + + +def test_delete_topic_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/clusters/sample3/topics/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_topic(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/clusters/*/topics/*}" % client.transport._host, args[1]) + + +def test_delete_topic_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_topic( + managed_kafka.DeleteTopicRequest(), + name='name_value', + ) + + +def test_delete_topic_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.ListConsumerGroupsRequest, + dict, +]) +def test_list_consumer_groups_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListConsumerGroupsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = managed_kafka.ListConsumerGroupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_consumer_groups(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListConsumerGroupsPager) + assert response.next_page_token == 'next_page_token_value' + +def test_list_consumer_groups_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_consumer_groups in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.list_consumer_groups] = mock_rpc + + request = {} + client.list_consumer_groups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_consumer_groups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_consumer_groups_rest_required_fields(request_type=managed_kafka.ListConsumerGroupsRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_consumer_groups._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_consumer_groups._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListConsumerGroupsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = managed_kafka.ListConsumerGroupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_consumer_groups(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_consumer_groups_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_consumer_groups._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_consumer_groups_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_list_consumer_groups") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_list_consumer_groups") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.ListConsumerGroupsRequest.pb(managed_kafka.ListConsumerGroupsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = managed_kafka.ListConsumerGroupsResponse.to_json(managed_kafka.ListConsumerGroupsResponse()) + + request = managed_kafka.ListConsumerGroupsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = managed_kafka.ListConsumerGroupsResponse() + + client.list_consumer_groups(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_consumer_groups_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.ListConsumerGroupsRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_consumer_groups(request) + + +def test_list_consumer_groups_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = managed_kafka.ListConsumerGroupsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = managed_kafka.ListConsumerGroupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_consumer_groups(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{parent=projects/*/locations/*/clusters/*}/consumerGroups" % client.transport._host, args[1]) + + +def test_list_consumer_groups_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_consumer_groups( + managed_kafka.ListConsumerGroupsRequest(), + parent='parent_value', + ) + + +def test_list_consumer_groups_rest_pager(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + next_page_token='abc', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[], + next_page_token='def', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + ], + next_page_token='ghi', + ), + managed_kafka.ListConsumerGroupsResponse( + consumer_groups=[ + resources.ConsumerGroup(), + resources.ConsumerGroup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(managed_kafka.ListConsumerGroupsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/locations/sample2/clusters/sample3'} + + pager = client.list_consumer_groups(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, resources.ConsumerGroup) + for i in results) + + pages = list(client.list_consumer_groups(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.GetConsumerGroupRequest, + dict, +]) +def test_get_consumer_group_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.ConsumerGroup( + name='name_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.ConsumerGroup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_consumer_group(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.ConsumerGroup) + assert response.name == 'name_value' + +def test_get_consumer_group_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_consumer_group in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.get_consumer_group] = mock_rpc + + request = {} + client.get_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_consumer_group_rest_required_fields(request_type=managed_kafka.GetConsumerGroupRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_consumer_group._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_consumer_group._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resources.ConsumerGroup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resources.ConsumerGroup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_consumer_group(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_consumer_group_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_consumer_group._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_consumer_group_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_get_consumer_group") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_get_consumer_group") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.GetConsumerGroupRequest.pb(managed_kafka.GetConsumerGroupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resources.ConsumerGroup.to_json(resources.ConsumerGroup()) + + request = managed_kafka.GetConsumerGroupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resources.ConsumerGroup() + + client.get_consumer_group(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_consumer_group_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.GetConsumerGroupRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_consumer_group(request) + + +def test_get_consumer_group_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.ConsumerGroup() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.ConsumerGroup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_consumer_group(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}" % client.transport._host, args[1]) + + +def test_get_consumer_group_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_consumer_group( + managed_kafka.GetConsumerGroupRequest(), + name='name_value', + ) + + +def test_get_consumer_group_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.UpdateConsumerGroupRequest, + dict, +]) +def test_update_consumer_group_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'consumer_group': {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'}} + request_init["consumer_group"] = {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4', 'topics': {}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = managed_kafka.UpdateConsumerGroupRequest.meta.fields["consumer_group"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["consumer_group"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["consumer_group"][field])): + del request_init["consumer_group"][field][i][subfield] + else: + del request_init["consumer_group"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.ConsumerGroup( + name='name_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.ConsumerGroup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_consumer_group(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, resources.ConsumerGroup) + assert response.name == 'name_value' + +def test_update_consumer_group_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_consumer_group in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.update_consumer_group] = mock_rpc + + request = {} + client.update_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_consumer_group_rest_required_fields(request_type=managed_kafka.UpdateConsumerGroupRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_consumer_group._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_consumer_group._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = resources.ConsumerGroup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = resources.ConsumerGroup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_consumer_group(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_consumer_group_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_consumer_group._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("updateMask", "consumerGroup", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_consumer_group_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "post_update_consumer_group") as post, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_update_consumer_group") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = managed_kafka.UpdateConsumerGroupRequest.pb(managed_kafka.UpdateConsumerGroupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = resources.ConsumerGroup.to_json(resources.ConsumerGroup()) + + request = managed_kafka.UpdateConsumerGroupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = resources.ConsumerGroup() + + client.update_consumer_group(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_consumer_group_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.UpdateConsumerGroupRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'consumer_group': {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_consumer_group(request) + + +def test_update_consumer_group_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = resources.ConsumerGroup() + + # get arguments that satisfy an http rule for this method + sample_request = {'consumer_group': {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'}} + + # get truthy value for each flattened field + mock_args = dict( + consumer_group=resources.ConsumerGroup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = resources.ConsumerGroup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_consumer_group(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{consumer_group.name=projects/*/locations/*/clusters/*/consumerGroups/*}" % client.transport._host, args[1]) + + +def test_update_consumer_group_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_consumer_group( + managed_kafka.UpdateConsumerGroupRequest(), + consumer_group=resources.ConsumerGroup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_consumer_group_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + managed_kafka.DeleteConsumerGroupRequest, + dict, +]) +def test_delete_consumer_group_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_consumer_group(request) + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_consumer_group_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_consumer_group in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = "foo" # operation_request.operation in compute client(s) expect a string. + client._transport._wrapped_methods[client._transport.delete_consumer_group] = mock_rpc + + request = {} + client.delete_consumer_group(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_consumer_group(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_consumer_group_rest_required_fields(request_type=managed_kafka.DeleteConsumerGroupRequest): + transport_class = transports.ManagedKafkaRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_consumer_group._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_consumer_group._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_consumer_group(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_consumer_group_rest_unset_required_fields(): + transport = transports.ManagedKafkaRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_consumer_group._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_consumer_group_rest_interceptors(null_interceptor): + transport = transports.ManagedKafkaRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.ManagedKafkaRestInterceptor(), + ) + client = ManagedKafkaClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.ManagedKafkaRestInterceptor, "pre_delete_consumer_group") as pre: + pre.assert_not_called() + pb_message = managed_kafka.DeleteConsumerGroupRequest.pb(managed_kafka.DeleteConsumerGroupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = managed_kafka.DeleteConsumerGroupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_consumer_group(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_consumer_group_rest_bad_request(transport: str = 'rest', request_type=managed_kafka.DeleteConsumerGroupRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_consumer_group(request) + + +def test_delete_consumer_group_rest_flattened(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/locations/sample2/clusters/sample3/consumerGroups/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_consumer_group(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v1/{name=projects/*/locations/*/clusters/*/consumerGroups/*}" % client.transport._host, args[1]) + + +def test_delete_consumer_group_rest_flattened_error(transport: str = 'rest'): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_consumer_group( + managed_kafka.DeleteConsumerGroupRequest(), + name='name_value', + ) + + +def test_delete_consumer_group_rest_error(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ManagedKafkaGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ManagedKafkaGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ManagedKafkaClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ManagedKafkaGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ManagedKafkaClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ManagedKafkaClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ManagedKafkaGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ManagedKafkaClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ManagedKafkaGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ManagedKafkaClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ManagedKafkaGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ManagedKafkaGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ManagedKafkaGrpcTransport, + transports.ManagedKafkaGrpcAsyncIOTransport, + transports.ManagedKafkaRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = ManagedKafkaClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ManagedKafkaGrpcTransport, + ) + +def test_managed_kafka_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ManagedKafkaTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_managed_kafka_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.managedkafka_v1.services.managed_kafka.transports.ManagedKafkaTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ManagedKafkaTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'list_clusters', + 'get_cluster', + 'create_cluster', + 'update_cluster', + 'delete_cluster', + 'list_topics', + 'get_topic', + 'create_topic', + 'update_topic', + 'delete_topic', + 'list_consumer_groups', + 'get_consumer_group', + 'update_consumer_group', + 'delete_consumer_group', + 'get_location', + 'list_locations', + 'get_operation', + 'cancel_operation', + 'delete_operation', + 'list_operations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_managed_kafka_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.managedkafka_v1.services.managed_kafka.transports.ManagedKafkaTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ManagedKafkaTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_managed_kafka_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.managedkafka_v1.services.managed_kafka.transports.ManagedKafkaTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ManagedKafkaTransport() + adc.assert_called_once() + + +def test_managed_kafka_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ManagedKafkaClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ManagedKafkaGrpcTransport, + transports.ManagedKafkaGrpcAsyncIOTransport, + ], +) +def test_managed_kafka_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ManagedKafkaGrpcTransport, + transports.ManagedKafkaGrpcAsyncIOTransport, + transports.ManagedKafkaRestTransport, + ], +) +def test_managed_kafka_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ManagedKafkaGrpcTransport, grpc_helpers), + (transports.ManagedKafkaGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_managed_kafka_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "managedkafka.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="managedkafka.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ManagedKafkaGrpcTransport, transports.ManagedKafkaGrpcAsyncIOTransport]) +def test_managed_kafka_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_managed_kafka_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ManagedKafkaRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_managed_kafka_rest_lro_client(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_managed_kafka_host_no_port(transport_name): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='managedkafka.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'managedkafka.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://managedkafka.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_managed_kafka_host_with_port(transport_name): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='managedkafka.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'managedkafka.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://managedkafka.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_managed_kafka_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = ManagedKafkaClient( + credentials=creds1, + transport=transport_name, + ) + client2 = ManagedKafkaClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.list_clusters._session + session2 = client2.transport.list_clusters._session + assert session1 != session2 + session1 = client1.transport.get_cluster._session + session2 = client2.transport.get_cluster._session + assert session1 != session2 + session1 = client1.transport.create_cluster._session + session2 = client2.transport.create_cluster._session + assert session1 != session2 + session1 = client1.transport.update_cluster._session + session2 = client2.transport.update_cluster._session + assert session1 != session2 + session1 = client1.transport.delete_cluster._session + session2 = client2.transport.delete_cluster._session + assert session1 != session2 + session1 = client1.transport.list_topics._session + session2 = client2.transport.list_topics._session + assert session1 != session2 + session1 = client1.transport.get_topic._session + session2 = client2.transport.get_topic._session + assert session1 != session2 + session1 = client1.transport.create_topic._session + session2 = client2.transport.create_topic._session + assert session1 != session2 + session1 = client1.transport.update_topic._session + session2 = client2.transport.update_topic._session + assert session1 != session2 + session1 = client1.transport.delete_topic._session + session2 = client2.transport.delete_topic._session + assert session1 != session2 + session1 = client1.transport.list_consumer_groups._session + session2 = client2.transport.list_consumer_groups._session + assert session1 != session2 + session1 = client1.transport.get_consumer_group._session + session2 = client2.transport.get_consumer_group._session + assert session1 != session2 + session1 = client1.transport.update_consumer_group._session + session2 = client2.transport.update_consumer_group._session + assert session1 != session2 + session1 = client1.transport.delete_consumer_group._session + session2 = client2.transport.delete_consumer_group._session + assert session1 != session2 +def test_managed_kafka_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ManagedKafkaGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_managed_kafka_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ManagedKafkaGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ManagedKafkaGrpcTransport, transports.ManagedKafkaGrpcAsyncIOTransport]) +def test_managed_kafka_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ManagedKafkaGrpcTransport, transports.ManagedKafkaGrpcAsyncIOTransport]) +def test_managed_kafka_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_managed_kafka_grpc_lro_client(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_managed_kafka_grpc_lro_async_client(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_path(): + project = "squid" + location = "clam" + cluster = "whelk" + expected = "projects/{project}/locations/{location}/clusters/{cluster}".format(project=project, location=location, cluster=cluster, ) + actual = ManagedKafkaClient.cluster_path(project, location, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "octopus", + "location": "oyster", + "cluster": "nudibranch", + } + path = ManagedKafkaClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_cluster_path(path) + assert expected == actual + +def test_consumer_group_path(): + project = "cuttlefish" + location = "mussel" + cluster = "winkle" + consumer_group = "nautilus" + expected = "projects/{project}/locations/{location}/clusters/{cluster}/consumerGroups/{consumer_group}".format(project=project, location=location, cluster=cluster, consumer_group=consumer_group, ) + actual = ManagedKafkaClient.consumer_group_path(project, location, cluster, consumer_group) + assert expected == actual + + +def test_parse_consumer_group_path(): + expected = { + "project": "scallop", + "location": "abalone", + "cluster": "squid", + "consumer_group": "clam", + } + path = ManagedKafkaClient.consumer_group_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_consumer_group_path(path) + assert expected == actual + +def test_crypto_key_path(): + project = "whelk" + location = "octopus" + key_ring = "oyster" + crypto_key = "nudibranch" + expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, ) + actual = ManagedKafkaClient.crypto_key_path(project, location, key_ring, crypto_key) + assert expected == actual + + +def test_parse_crypto_key_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + "key_ring": "winkle", + "crypto_key": "nautilus", + } + path = ManagedKafkaClient.crypto_key_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_crypto_key_path(path) + assert expected == actual + +def test_topic_path(): + project = "scallop" + location = "abalone" + cluster = "squid" + topic = "clam" + expected = "projects/{project}/locations/{location}/clusters/{cluster}/topics/{topic}".format(project=project, location=location, cluster=cluster, topic=topic, ) + actual = ManagedKafkaClient.topic_path(project, location, cluster, topic) + assert expected == actual + + +def test_parse_topic_path(): + expected = { + "project": "whelk", + "location": "octopus", + "cluster": "oyster", + "topic": "nudibranch", + } + path = ManagedKafkaClient.topic_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_topic_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ManagedKafkaClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = ManagedKafkaClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = ManagedKafkaClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = ManagedKafkaClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ManagedKafkaClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = ManagedKafkaClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = ManagedKafkaClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = ManagedKafkaClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ManagedKafkaClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = ManagedKafkaClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ManagedKafkaClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ManagedKafkaTransport, '_prep_wrapped_messages') as prep: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ManagedKafkaTransport, '_prep_wrapped_messages') as prep: + transport_class = ManagedKafkaClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request(transport: str = 'rest', request_type=locations_pb2.GetLocationRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({'name': 'projects/sample1/locations/sample2'}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + +@pytest.mark.parametrize("request_type", [ + locations_pb2.GetLocationRequest, + dict, +]) +def test_get_location_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {'name': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_list_locations_rest_bad_request(transport: str = 'rest', request_type=locations_pb2.ListLocationsRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({'name': 'projects/sample1'}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + +@pytest.mark.parametrize("request_type", [ + locations_pb2.ListLocationsRequest, + dict, +]) +def test_list_locations_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {'name': 'projects/sample1'} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_cancel_operation_rest_bad_request(transport: str = 'rest', request_type=operations_pb2.CancelOperationRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({'name': 'projects/sample1/locations/sample2/operations/sample3'}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + +@pytest.mark.parametrize("request_type", [ + operations_pb2.CancelOperationRequest, + dict, +]) +def test_cancel_operation_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {'name': 'projects/sample1/locations/sample2/operations/sample3'} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '{}' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_rest_bad_request(transport: str = 'rest', request_type=operations_pb2.DeleteOperationRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({'name': 'projects/sample1/locations/sample2/operations/sample3'}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + +@pytest.mark.parametrize("request_type", [ + operations_pb2.DeleteOperationRequest, + dict, +]) +def test_delete_operation_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {'name': 'projects/sample1/locations/sample2/operations/sample3'} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '{}' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + +def test_get_operation_rest_bad_request(transport: str = 'rest', request_type=operations_pb2.GetOperationRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({'name': 'projects/sample1/locations/sample2/operations/sample3'}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + +@pytest.mark.parametrize("request_type", [ + operations_pb2.GetOperationRequest, + dict, +]) +def test_get_operation_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {'name': 'projects/sample1/locations/sample2/operations/sample3'} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_list_operations_rest_bad_request(transport: str = 'rest', request_type=operations_pb2.ListOperationsRequest): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({'name': 'projects/sample1/locations/sample2'}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + +@pytest.mark.parametrize("request_type", [ + operations_pb2.ListOperationsRequest, + dict, +]) +def test_list_operations_rest(request_type): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {'name': 'projects/sample1/locations/sample2'} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_delete_operation(transport: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_delete_operation_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_delete_operation_from_dict(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + +def test_cancel_operation_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_cancel_operation_from_dict(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + None + ) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + +def test_get_operation_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_get_operation_from_dict(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + +def test_list_operations_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_operations_from_dict(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + +def test_list_locations_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations",) in kw["metadata"] + +def test_list_locations_from_dict(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + +def test_get_location_field_headers(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=locations/abc",) in kw["metadata"] + +def test_get_location_from_dict(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = ManagedKafkaAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = ManagedKafkaClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (ManagedKafkaClient, transports.ManagedKafkaGrpcTransport), + (ManagedKafkaAsyncClient, transports.ManagedKafkaGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + )