# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler.  DO NOT EDIT!
# source: mediapipe/calculators/tensor/inference_calculator.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)

_sym_db = _symbol_database.Default()


from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
  mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
  mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
from mediapipe.framework import calculator_options_pb2 as mediapipe_dot_framework_dot_calculator__options__pb2


DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n7mediapipe/calculators/tensor/inference_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\x1a,mediapipe/framework/calculator_options.proto\"\xce\x11\n\x1aInferenceCalculatorOptions\x12\x12\n\nmodel_path\x18\x01 \x01(\t\x12\x16\n\x0etry_mmap_model\x18\x07 \x01(\x08\x12\x1a\n\x07use_gpu\x18\x02 \x01(\x08:\x05\x66\x61lseB\x02\x18\x01\x12\x1c\n\tuse_nnapi\x18\x03 \x01(\x08:\x05\x66\x61lseB\x02\x18\x01\x12\x1a\n\x0e\x63pu_num_thread\x18\x04 \x01(\x05:\x02-1\x12@\n\x08\x64\x65legate\x18\x05 \x01(\x0b\x32..mediapipe.InferenceCalculatorOptions.Delegate\x12T\n\x13input_output_config\x18\x08 \x01(\x0b\x32\x37.mediapipe.InferenceCalculatorOptions.InputOutputConfig\x1a\xb4\x08\n\x08\x44\x65legate\x12G\n\x06tflite\x18\x01 \x01(\x0b\x32\x35.mediapipe.InferenceCalculatorOptions.Delegate.TfLiteH\x00\x12\x41\n\x03gpu\x18\x02 \x01(\x0b\x32\x32.mediapipe.InferenceCalculatorOptions.Delegate.GpuH\x00\x12\x45\n\x05nnapi\x18\x03 \x01(\x0b\x32\x34.mediapipe.InferenceCalculatorOptions.Delegate.NnapiH\x00\x12I\n\x07xnnpack\x18\x04 \x01(\x0b\x32\x36.mediapipe.InferenceCalculatorOptions.Delegate.XnnpackH\x00\x1a\x08\n\x06TfLite\x1a\x84\x05\n\x03Gpu\x12#\n\x14use_advanced_gpu_api\x18\x01 \x01(\x08:\x05\x66\x61lse\x12H\n\x03\x61pi\x18\x04 \x01(\x0e\x32\x36.mediapipe.InferenceCalculatorOptions.Delegate.Gpu.Api:\x03\x41NY\x12\"\n\x14\x61llow_precision_loss\x18\x03 \x01(\x08:\x04true\x12\x1a\n\x12\x63\x61\x63hed_kernel_path\x18\x02 \x01(\t\x12\x1c\n\x14serialized_model_dir\x18\x07 \x01(\t\x12w\n\x16\x63\x61\x63he_writing_behavior\x18\n \x01(\x0e\x32G.mediapipe.InferenceCalculatorOptions.Delegate.Gpu.CacheWritingBehavior:\x0eWRITE_OR_ERROR\x12\x13\n\x0bmodel_token\x18\x08 \x01(\t\x12\x61\n\x05usage\x18\x05 \x01(\x0e\x32\x41.mediapipe.InferenceCalculatorOptions.Delegate.Gpu.InferenceUsage:\x0fSUSTAINED_SPEED\"&\n\x03\x41pi\x12\x07\n\x03\x41NY\x10\x00\x12\n\n\x06OPENGL\x10\x01\x12\n\n\x06OPENCL\x10\x02\"G\n\x14\x43\x61\x63heWritingBehavior\x12\x0c\n\x08NO_WRITE\x10\x00\x12\r\n\tTRY_WRITE\x10\x01\x12\x12\n\x0eWRITE_OR_ERROR\x10\x02\"N\n\x0eInferenceUsage\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x16\n\x12\x46\x41ST_SINGLE_ANSWER\x10\x01\x12\x13\n\x0fSUSTAINED_SPEED\x10\x02\x1aI\n\x05Nnapi\x12\x11\n\tcache_dir\x18\x01 \x01(\t\x12\x13\n\x0bmodel_token\x18\x02 \x01(\t\x12\x18\n\x10\x61\x63\x63\x65lerator_name\x18\x03 \x01(\t\x1a\"\n\x07Xnnpack\x12\x17\n\x0bnum_threads\x18\x01 \x01(\x05:\x02-1B\n\n\x08\x64\x65legate\x1a\x88\x06\n\x11InputOutputConfig\x12l\n\x18input_tensor_indices_map\x18\x01 \x01(\x0b\x32H.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMapH\x00\x12h\n\x16input_tensor_names_map\x18\x03 \x01(\x0b\x32\x46.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMapH\x00\x12m\n\x19output_tensor_indices_map\x18\x02 \x01(\x0b\x32H.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMapH\x01\x12i\n\x17output_tensor_names_map\x18\x04 \x01(\x0b\x32\x46.mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMapH\x01\x12i\n\x15\x66\x65\x65\x64\x62\x61\x63k_tensor_links\x18\x05 \x03(\x0b\x32J.mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink\x1a\x34\n\x10TensorIndicesMap\x12 \n\x14model_tensor_indices\x18\x01 \x03(\x05\x42\x02\x10\x01\x1a&\n\x0eTensorNamesMap\x12\x14\n\x0ctensor_names\x18\x01 \x03(\t\x1aS\n\x12\x46\x65\x65\x64\x62\x61\x63kTensorLink\x12\x1f\n\x17\x66rom_output_tensor_name\x18\x01 \x01(\t\x12\x1c\n\x14to_input_tensor_name\x18\x02 \x01(\tB\x10\n\x0eInputTensorMapB\x11\n\x0fOutputTensorMap2T\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xf7\xd3\xcb\xa0\x01 \x01(\x0b\x32%.mediapipe.InferenceCalculatorOptionsBA\n%com.google.mediapipe.calculator.protoB\x18InferenceCalculatorProto')



_INFERENCECALCULATOROPTIONS = DESCRIPTOR.message_types_by_name['InferenceCalculatorOptions']
_INFERENCECALCULATOROPTIONS_DELEGATE = _INFERENCECALCULATOROPTIONS.nested_types_by_name['Delegate']
_INFERENCECALCULATOROPTIONS_DELEGATE_TFLITE = _INFERENCECALCULATOROPTIONS_DELEGATE.nested_types_by_name['TfLite']
_INFERENCECALCULATOROPTIONS_DELEGATE_GPU = _INFERENCECALCULATOROPTIONS_DELEGATE.nested_types_by_name['Gpu']
_INFERENCECALCULATOROPTIONS_DELEGATE_NNAPI = _INFERENCECALCULATOROPTIONS_DELEGATE.nested_types_by_name['Nnapi']
_INFERENCECALCULATOROPTIONS_DELEGATE_XNNPACK = _INFERENCECALCULATOROPTIONS_DELEGATE.nested_types_by_name['Xnnpack']
_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG = _INFERENCECALCULATOROPTIONS.nested_types_by_name['InputOutputConfig']
_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP = _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG.nested_types_by_name['TensorIndicesMap']
_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORNAMESMAP = _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG.nested_types_by_name['TensorNamesMap']
_INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_FEEDBACKTENSORLINK = _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG.nested_types_by_name['FeedbackTensorLink']
_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_API = _INFERENCECALCULATOROPTIONS_DELEGATE_GPU.enum_types_by_name['Api']
_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_CACHEWRITINGBEHAVIOR = _INFERENCECALCULATOROPTIONS_DELEGATE_GPU.enum_types_by_name['CacheWritingBehavior']
_INFERENCECALCULATOROPTIONS_DELEGATE_GPU_INFERENCEUSAGE = _INFERENCECALCULATOROPTIONS_DELEGATE_GPU.enum_types_by_name['InferenceUsage']
InferenceCalculatorOptions = _reflection.GeneratedProtocolMessageType('InferenceCalculatorOptions', (_message.Message,), {

  'Delegate' : _reflection.GeneratedProtocolMessageType('Delegate', (_message.Message,), {

    'TfLite' : _reflection.GeneratedProtocolMessageType('TfLite', (_message.Message,), {
      'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_DELEGATE_TFLITE,
      '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
      # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.TfLite)
      })
    ,

    'Gpu' : _reflection.GeneratedProtocolMessageType('Gpu', (_message.Message,), {
      'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_DELEGATE_GPU,
      '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
      # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.Gpu)
      })
    ,

    'Nnapi' : _reflection.GeneratedProtocolMessageType('Nnapi', (_message.Message,), {
      'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_DELEGATE_NNAPI,
      '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
      # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.Nnapi)
      })
    ,

    'Xnnpack' : _reflection.GeneratedProtocolMessageType('Xnnpack', (_message.Message,), {
      'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_DELEGATE_XNNPACK,
      '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
      # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack)
      })
    ,
    'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_DELEGATE,
    '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
    # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.Delegate)
    })
  ,

  'InputOutputConfig' : _reflection.GeneratedProtocolMessageType('InputOutputConfig', (_message.Message,), {

    'TensorIndicesMap' : _reflection.GeneratedProtocolMessageType('TensorIndicesMap', (_message.Message,), {
      'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP,
      '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
      # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
      })
    ,

    'TensorNamesMap' : _reflection.GeneratedProtocolMessageType('TensorNamesMap', (_message.Message,), {
      'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORNAMESMAP,
      '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
      # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
      })
    ,

    'FeedbackTensorLink' : _reflection.GeneratedProtocolMessageType('FeedbackTensorLink', (_message.Message,), {
      'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_FEEDBACKTENSORLINK,
      '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
      # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)
      })
    ,
    'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG,
    '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
    # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions.InputOutputConfig)
    })
  ,
  'DESCRIPTOR' : _INFERENCECALCULATOROPTIONS,
  '__module__' : 'mediapipe.calculators.tensor.inference_calculator_pb2'
  # @@protoc_insertion_point(class_scope:mediapipe.InferenceCalculatorOptions)
  })
_sym_db.RegisterMessage(InferenceCalculatorOptions)
_sym_db.RegisterMessage(InferenceCalculatorOptions.Delegate)
_sym_db.RegisterMessage(InferenceCalculatorOptions.Delegate.TfLite)
_sym_db.RegisterMessage(InferenceCalculatorOptions.Delegate.Gpu)
_sym_db.RegisterMessage(InferenceCalculatorOptions.Delegate.Nnapi)
_sym_db.RegisterMessage(InferenceCalculatorOptions.Delegate.Xnnpack)
_sym_db.RegisterMessage(InferenceCalculatorOptions.InputOutputConfig)
_sym_db.RegisterMessage(InferenceCalculatorOptions.InputOutputConfig.TensorIndicesMap)
_sym_db.RegisterMessage(InferenceCalculatorOptions.InputOutputConfig.TensorNamesMap)
_sym_db.RegisterMessage(InferenceCalculatorOptions.InputOutputConfig.FeedbackTensorLink)

if _descriptor._USE_C_DESCRIPTORS == False:
  mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_INFERENCECALCULATOROPTIONS.extensions_by_name['ext'])

  DESCRIPTOR._options = None
  DESCRIPTOR._serialized_options = b'\n%com.google.mediapipe.calculator.protoB\030InferenceCalculatorProto'
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP.fields_by_name['model_tensor_indices']._options = None
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP.fields_by_name['model_tensor_indices']._serialized_options = b'\020\001'
  _INFERENCECALCULATOROPTIONS.fields_by_name['use_gpu']._options = None
  _INFERENCECALCULATOROPTIONS.fields_by_name['use_gpu']._serialized_options = b'\030\001'
  _INFERENCECALCULATOROPTIONS.fields_by_name['use_nnapi']._options = None
  _INFERENCECALCULATOROPTIONS.fields_by_name['use_nnapi']._serialized_options = b'\030\001'
  _INFERENCECALCULATOROPTIONS._serialized_start=155
  _INFERENCECALCULATOROPTIONS._serialized_end=2409
  _INFERENCECALCULATOROPTIONS_DELEGATE._serialized_start=468
  _INFERENCECALCULATOROPTIONS_DELEGATE._serialized_end=1544
  _INFERENCECALCULATOROPTIONS_DELEGATE_TFLITE._serialized_start=766
  _INFERENCECALCULATOROPTIONS_DELEGATE_TFLITE._serialized_end=774
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU._serialized_start=777
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU._serialized_end=1421
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU_API._serialized_start=1230
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU_API._serialized_end=1268
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU_CACHEWRITINGBEHAVIOR._serialized_start=1270
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU_CACHEWRITINGBEHAVIOR._serialized_end=1341
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU_INFERENCEUSAGE._serialized_start=1343
  _INFERENCECALCULATOROPTIONS_DELEGATE_GPU_INFERENCEUSAGE._serialized_end=1421
  _INFERENCECALCULATOROPTIONS_DELEGATE_NNAPI._serialized_start=1423
  _INFERENCECALCULATOROPTIONS_DELEGATE_NNAPI._serialized_end=1496
  _INFERENCECALCULATOROPTIONS_DELEGATE_XNNPACK._serialized_start=1498
  _INFERENCECALCULATOROPTIONS_DELEGATE_XNNPACK._serialized_end=1532
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG._serialized_start=1547
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG._serialized_end=2323
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP._serialized_start=2109
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORINDICESMAP._serialized_end=2161
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORNAMESMAP._serialized_start=2163
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_TENSORNAMESMAP._serialized_end=2201
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_FEEDBACKTENSORLINK._serialized_start=2203
  _INFERENCECALCULATOROPTIONS_INPUTOUTPUTCONFIG_FEEDBACKTENSORLINK._serialized_end=2286
# @@protoc_insertion_point(module_scope)
