
# Autogenerated by mlir-tblgen; don't manually edit.

from ._ods_common import _cext as _ods_cext
from ._ods_common import (
    equally_sized_accessor as _ods_equally_sized_accessor,
    get_default_loc_context as _ods_get_default_loc_context,
    get_op_results_or_values as _get_op_results_or_values,
    segmented_accessor as _ods_segmented_accessor,
)
_ods_ir = _ods_cext.ir
_ods_cext.globals.register_traceback_file_exclusion(__file__)

import builtins
from typing import Sequence as _Sequence, Union as _Union, Optional as _Optional


@_ods_cext.register_dialect
class _Dialect(_ods_ir.Dialect):
  DIALECT_NAMESPACE = "stablehlo"

@_ods_cext.register_operation(_Dialect)
class AbsOp(_ods_ir.OpView):
  r"""
  Performs element-wise abs operation on `operand` tensor and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#abs
  
  Example:
  ```mlir
  %result = stablehlo.abs %operand : tensor<3xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.abs"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def abs(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return AbsOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class AddOp(_ods_ir.OpView):
  r"""
  Performs element-wise addition of two tensors `lhs` and `rhs` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#add
  
  Example:
  ```mlir
  %result = stablehlo.add %lhs, %rhs : tensor<2x2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.add"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def add(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return AddOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class AfterAllOp(_ods_ir.OpView):
  r"""
  Ensures that the operations producing the `inputs` are executed before any
  operations that depend on `result`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#after_all
  
  Example:
  ```mlir
  %result = stablehlo.after_all %input0, %input1 : !stablehlo.token
  ```
  """

  OPERATION_NAME = "stablehlo.after_all"

  _ODS_REGIONS = (0, True)

  def __init__(self, inputs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def result(self) -> _ods_ir.OpResult:
    return self.operation.results[0]

def after_all(inputs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return AfterAllOp(inputs=inputs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class AllGatherOp(_ods_ir.OpView):
  r"""
  Within each process group in the process grid, concatenates the values of the
  `operand` tensor from each process along `all_gather_dim` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#all_gather
  
  Example:
  ```mlir
  %result:2 = "stablehlo.all_gather"(%operand0, %operand1) {
    all_gather_dim = 1 : i64,
    replica_groups = dense<[[0, 1]]> : tensor<1x2xi64>,
    channel_handle = #stablehlo.channel_handle<handle = 0, type = 0>
  } : (tensor<2x2xi64>, tensor<2x2xi64>) -> (tensor<2x4xi64>, tensor<2x4xi64>)
  ```
  """

  OPERATION_NAME = "stablehlo.all_gather"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operands_, all_gather_dim, replica_groups, *, channel_handle=None, use_global_device_ids=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(operands_))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["all_gather_dim"] = (all_gather_dim if (
    isinstance(all_gather_dim, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(all_gather_dim, context=_ods_context))
    attributes["replica_groups"] = (replica_groups if (
    isinstance(replica_groups, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
      _ods_ir.AttrBuilder.get('I64ElementsAttr')(replica_groups, context=_ods_context))
    if channel_handle is not None: attributes["channel_handle"] = (channel_handle if (
        isinstance(channel_handle, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
          _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    if bool(use_global_device_ids): attributes["use_global_device_ids"] = _ods_ir.UnitAttr.get(
      _ods_get_default_loc_context(loc))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operands_(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def all_gather_dim(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["all_gather_dim"]

  @all_gather_dim.setter
  def all_gather_dim(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["all_gather_dim"] = value

  @builtins.property
  def replica_groups(self) -> _ods_ir.DenseIntElementsAttr:
    return self.operation.attributes["replica_groups"]

  @replica_groups.setter
  def replica_groups(self, value: _ods_ir.DenseIntElementsAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["replica_groups"] = value

  @builtins.property
  def channel_handle(self) -> _Optional[_ods_ir.Attribute]:
    if "channel_handle" not in self.operation.attributes:
      return None
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["channel_handle"] = value
    elif "channel_handle" in self.operation.attributes:
      del self.operation.attributes["channel_handle"]

  @channel_handle.deleter
  def channel_handle(self):
    del self.operation.attributes["channel_handle"]

  @builtins.property
  def use_global_device_ids(self) -> bool:
    return "use_global_device_ids" in self.operation.attributes

  @use_global_device_ids.setter
  def use_global_device_ids(self, value):
    if bool(value):
      self.operation.attributes["use_global_device_ids"] = _ods_ir.UnitAttr.get()
    elif "use_global_device_ids" in self.operation.attributes:
      del self.operation.attributes["use_global_device_ids"]

  @use_global_device_ids.deleter
  def use_global_device_ids(self):
    del self.operation.attributes["use_global_device_ids"]

def all_gather(result, operands_, all_gather_dim, replica_groups, *, channel_handle=None, use_global_device_ids=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, AllGatherOp]:
  op = AllGatherOp(result=result, operands_=operands_, all_gather_dim=all_gather_dim, replica_groups=replica_groups, channel_handle=channel_handle, use_global_device_ids=use_global_device_ids, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class AllReduceOp(_ods_ir.OpView):
  r"""
  Within each process group in the process grid, applies a reduction function
  `computation` to the values of the `operand` tensor from each process and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#all_reduce
  
  Example:
  ```mlir
  %result:2 = "stablehlo.all_reduce"(%operand0, %operand0) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
    %0 = "stablehlo.add"(%arg0, %arg1) : (tensor<i64>, tensor<i64>) -> tensor<i64>
    "stablehlo.return"(%0) : (tensor<i64>) -> ()
  }) {
    replica_groups = dense<[[0, 1]]> : tensor<1x2xi64>,
    channel_handle = #stablehlo.channel_handle<handle = 0, type = 0>
  } : (tensor<4xi64>, tensor<4xi64>) -> (tensor<4xi64>, tensor<4xi64>)
  ```
  """

  OPERATION_NAME = "stablehlo.all_reduce"

  _ODS_REGIONS = (1, True)

  def __init__(self, result, operands_, replica_groups, *, channel_handle=None, use_global_device_ids=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(operands_))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["replica_groups"] = (replica_groups if (
    isinstance(replica_groups, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
      _ods_ir.AttrBuilder.get('I64ElementsAttr')(replica_groups, context=_ods_context))
    if channel_handle is not None: attributes["channel_handle"] = (channel_handle if (
        isinstance(channel_handle, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
          _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    if bool(use_global_device_ids): attributes["use_global_device_ids"] = _ods_ir.UnitAttr.get(
      _ods_get_default_loc_context(loc))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operands_(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def replica_groups(self) -> _ods_ir.DenseIntElementsAttr:
    return self.operation.attributes["replica_groups"]

  @replica_groups.setter
  def replica_groups(self, value: _ods_ir.DenseIntElementsAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["replica_groups"] = value

  @builtins.property
  def channel_handle(self) -> _Optional[_ods_ir.Attribute]:
    if "channel_handle" not in self.operation.attributes:
      return None
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["channel_handle"] = value
    elif "channel_handle" in self.operation.attributes:
      del self.operation.attributes["channel_handle"]

  @channel_handle.deleter
  def channel_handle(self):
    del self.operation.attributes["channel_handle"]

  @builtins.property
  def use_global_device_ids(self) -> bool:
    return "use_global_device_ids" in self.operation.attributes

  @use_global_device_ids.setter
  def use_global_device_ids(self, value):
    if bool(value):
      self.operation.attributes["use_global_device_ids"] = _ods_ir.UnitAttr.get()
    elif "use_global_device_ids" in self.operation.attributes:
      del self.operation.attributes["use_global_device_ids"]

  @use_global_device_ids.deleter
  def use_global_device_ids(self):
    del self.operation.attributes["use_global_device_ids"]

  @builtins.property
  def computation(self) -> _ods_ir.Region:
    return self.regions[0]

def all_reduce(result, operands_, replica_groups, *, channel_handle=None, use_global_device_ids=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, AllReduceOp]:
  op = AllReduceOp(result=result, operands_=operands_, replica_groups=replica_groups, channel_handle=channel_handle, use_global_device_ids=use_global_device_ids, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class AllToAllOp(_ods_ir.OpView):
  r"""
  Within each process group in the process grid, splits the values of the
  `operand` tensor along `split_dimension` into parts, scatters the split parts
  between the processes, concatenates the scattered parts along `concat_dimension`
  and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#all_to_all
  
  Example:
  ```mlir
  %result:2 = "stablehlo.all_to_all"(%operand1, %operand2) {
    split_dimension = 1 : i64,
    concat_dimension = 0 : i64,
    split_count = 2 : i64,
    replica_groups = dense<[[0, 1]]> : tensor<1x2xi64>
  } : (tensor<2x4xi64>, tensor<2x4xi64>) -> (tensor<4x2xi64>, tensor<4x2xi64>)
  ```
  """

  OPERATION_NAME = "stablehlo.all_to_all"

  _ODS_REGIONS = (0, True)

  def __init__(self, operands_, split_dimension, concat_dimension, split_count, replica_groups, *, channel_handle=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(operands_))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["split_dimension"] = (split_dimension if (
    isinstance(split_dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(split_dimension, context=_ods_context))
    attributes["concat_dimension"] = (concat_dimension if (
    isinstance(concat_dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(concat_dimension, context=_ods_context))
    attributes["split_count"] = (split_count if (
    isinstance(split_count, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(split_count, context=_ods_context))
    attributes["replica_groups"] = (replica_groups if (
    isinstance(replica_groups, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
      _ods_ir.AttrBuilder.get('I64ElementsAttr')(replica_groups, context=_ods_context))
    if channel_handle is not None: attributes["channel_handle"] = (channel_handle if (
        isinstance(channel_handle, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
          _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operands_(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def split_dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["split_dimension"]

  @split_dimension.setter
  def split_dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["split_dimension"] = value

  @builtins.property
  def concat_dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["concat_dimension"]

  @concat_dimension.setter
  def concat_dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["concat_dimension"] = value

  @builtins.property
  def split_count(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["split_count"]

  @split_count.setter
  def split_count(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["split_count"] = value

  @builtins.property
  def replica_groups(self) -> _ods_ir.DenseIntElementsAttr:
    return self.operation.attributes["replica_groups"]

  @replica_groups.setter
  def replica_groups(self, value: _ods_ir.DenseIntElementsAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["replica_groups"] = value

  @builtins.property
  def channel_handle(self) -> _Optional[_ods_ir.Attribute]:
    if "channel_handle" not in self.operation.attributes:
      return None
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["channel_handle"] = value
    elif "channel_handle" in self.operation.attributes:
      del self.operation.attributes["channel_handle"]

  @channel_handle.deleter
  def channel_handle(self):
    del self.operation.attributes["channel_handle"]

def all_to_all(operands_, split_dimension, concat_dimension, split_count, replica_groups, *, channel_handle=None, results=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, AllToAllOp]:
  op = AllToAllOp(operands_=operands_, split_dimension=split_dimension, concat_dimension=concat_dimension, split_count=split_count, replica_groups=replica_groups, channel_handle=channel_handle, results=results, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class AndOp(_ods_ir.OpView):
  r"""
  Performs element-wise AND of two tensors `lhs` and `rhs` and produces a
  `result` tensor
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#and
  
  Example:
  ```mlir
  %result = stablehlo.and %lhs, %rhs : tensor<2x2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.and"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def and_(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return AndOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class Atan2Op(_ods_ir.OpView):
  r"""
  Performs element-wise atan2 operation on `lhs` and `rhs` tensor and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#atan2
  
  Example:
  ```mlir
  %result = stablehlo.atan2 %lhs, %rhs : tensor<3xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.atan2"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def atan2(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return Atan2Op(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class BatchNormGradOp(_ods_ir.OpView):
  r"""
  Computes gradients of several inputs of BatchNormTrainingOp backpropagating
  from `grad_output`, and produces `grad_operand`, `grad_scale` and
  `grad_offset` tensors.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#batch_norm_grad
  
  Example:
  ```mlir
  %grad_operand, %grad_scale, %grad_offset =
  "stablehlo.batch_norm_grad"(%operand, %scale, %mean, %variance, %grad_output) {
    epsilon = 0.0 : f32,
    feature_index = 2 : i64
  } : (tensor<2x2x2xf64>, tensor<2xf64>, tensor<2xf64>, tensor<2xf64>,
       tensor<2x2x2xf64>) -> (tensor<2x2x2xf64>, tensor<2xf64>, tensor<2xf64>)
  ```
  """

  OPERATION_NAME = "stablehlo.batch_norm_grad"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, scale, mean, variance, grad_output, epsilon, feature_index, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(scale)
    operands.append(mean)
    operands.append(variance)
    operands.append(grad_output)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["epsilon"] = (epsilon if (
    isinstance(epsilon, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('F32Attr')) else
      _ods_ir.AttrBuilder.get('F32Attr')(epsilon, context=_ods_context))
    attributes["feature_index"] = (feature_index if (
    isinstance(feature_index, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(feature_index, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def scale(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def mean(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def variance(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[3]

  @builtins.property
  def grad_output(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[4]

  @builtins.property
  def epsilon(self) -> _ods_ir.FloatAttr:
    return self.operation.attributes["epsilon"]

  @epsilon.setter
  def epsilon(self, value: _ods_ir.FloatAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["epsilon"] = value

  @builtins.property
  def feature_index(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["feature_index"]

  @feature_index.setter
  def feature_index(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["feature_index"] = value

  @builtins.property
  def grad_operand(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

  @builtins.property
  def grad_scale(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[1]

  @builtins.property
  def grad_offset(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[2]

def batch_norm_grad(operand, scale, mean, variance, grad_output, epsilon, feature_index, *, results=None, loc=None, ip=None) -> _ods_ir.OpResultList:
  return BatchNormGradOp(operand=operand, scale=scale, mean=mean, variance=variance, grad_output=grad_output, epsilon=epsilon, feature_index=feature_index, results=results, loc=loc, ip=ip).results

@_ods_cext.register_operation(_Dialect)
class BatchNormInferenceOp(_ods_ir.OpView):
  r"""
  Normalizes the `operand` tensor across all dimensions except for the
  `feature_index` dimension and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#batch_norm_inference
  
  Example:
  ```mlir
  %result = "stablehlo.batch_norm_inference"(%operand, %scale, %offset, %mean, %variance) {
    epsilon = 0.0 : f32,
    feature_index = 2 : i64
  } : (tensor<2x2x2xf64>, tensor<2xf64>, tensor<2xf64>, tensor<2xf64>, tensor<2xf64>) -> tensor<2x2x2xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.batch_norm_inference"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, scale, offset, mean, variance, epsilon, feature_index, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(scale)
    operands.append(offset)
    operands.append(mean)
    operands.append(variance)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["epsilon"] = (epsilon if (
    isinstance(epsilon, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('F32Attr')) else
      _ods_ir.AttrBuilder.get('F32Attr')(epsilon, context=_ods_context))
    attributes["feature_index"] = (feature_index if (
    isinstance(feature_index, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(feature_index, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def scale(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def offset(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def mean(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[3]

  @builtins.property
  def variance(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[4]

  @builtins.property
  def epsilon(self) -> _ods_ir.FloatAttr:
    return self.operation.attributes["epsilon"]

  @epsilon.setter
  def epsilon(self, value: _ods_ir.FloatAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["epsilon"] = value

  @builtins.property
  def feature_index(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["feature_index"]

  @feature_index.setter
  def feature_index(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["feature_index"] = value

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def batch_norm_inference(operand, scale, offset, mean, variance, epsilon, feature_index, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return BatchNormInferenceOp(operand=operand, scale=scale, offset=offset, mean=mean, variance=variance, epsilon=epsilon, feature_index=feature_index, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class BatchNormTrainingOp(_ods_ir.OpView):
  r"""
  Computes mean and variance across batch and spatial dimensions and
  normalizes the `operand` tensor, for each feature in the `feature_index`
  dimension and produces `output`, `batch_mean` and `batch_var` tensors.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#batch_norm_training
  
  Example:
  ```mlir
  %output, %batch_mean, %batch_var = "stablehlo.batch_norm_training"(%operand, %scale, %offset) {
    epsilon = 0.0 : f32,
    feature_index = 2 : i64
  } : (tensor<2x2x2xf64>, tensor<2xf64>, tensor<2xf64>) ->
      (tensor<2x2x2xf64>, tensor<2xf64>, tensor<2xf64>)
  ```
  """

  OPERATION_NAME = "stablehlo.batch_norm_training"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, scale, offset, epsilon, feature_index, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(scale)
    operands.append(offset)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["epsilon"] = (epsilon if (
    isinstance(epsilon, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('F32Attr')) else
      _ods_ir.AttrBuilder.get('F32Attr')(epsilon, context=_ods_context))
    attributes["feature_index"] = (feature_index if (
    isinstance(feature_index, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(feature_index, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def scale(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def offset(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def epsilon(self) -> _ods_ir.FloatAttr:
    return self.operation.attributes["epsilon"]

  @epsilon.setter
  def epsilon(self, value: _ods_ir.FloatAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["epsilon"] = value

  @builtins.property
  def feature_index(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["feature_index"]

  @feature_index.setter
  def feature_index(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["feature_index"] = value

  @builtins.property
  def output(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

  @builtins.property
  def batch_mean(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[1]

  @builtins.property
  def batch_var(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[2]

def batch_norm_training(operand, scale, offset, epsilon, feature_index, *, results=None, loc=None, ip=None) -> _ods_ir.OpResultList:
  return BatchNormTrainingOp(operand=operand, scale=scale, offset=offset, epsilon=epsilon, feature_index=feature_index, results=results, loc=loc, ip=ip).results

@_ods_cext.register_operation(_Dialect)
class BitcastConvertOp(_ods_ir.OpView):
  r"""
  Performs a bitcast operation on `operand` tensor and produces a `result`
  tensor where the bits of the entire `operand` tensor are reinterpreted using
  the type of the `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#bitcast_convert
  
  Example:
  ```mlir
  %result = stablehlo.bitcast_convert %operand : (tensor<f64>) -> tensor<4xf16>
  ```
  """

  OPERATION_NAME = "stablehlo.bitcast_convert"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

def bitcast_convert(result, operand, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return BitcastConvertOp(result=result, operand=operand, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class BroadcastInDimOp(_ods_ir.OpView):
  r"""
  Expands the dimensions and/or rank of an input tensor by duplicating the
  data in the `operand` tensor and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#broadcast_in_dim
  
  Example:
  ```mlir
  %result = stablehlo.broadcast_in_dim %operand, dims = [2, 1] : (tensor<1x3xi32>) -> tensor<2x3x2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.broadcast_in_dim"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, broadcast_dimensions, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["broadcast_dimensions"] = (broadcast_dimensions if (
    isinstance(broadcast_dimensions, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(broadcast_dimensions, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def broadcast_dimensions(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["broadcast_dimensions"]

  @broadcast_dimensions.setter
  def broadcast_dimensions(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["broadcast_dimensions"] = value

def broadcast_in_dim(result, operand, broadcast_dimensions, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return BroadcastInDimOp(result=result, operand=operand, broadcast_dimensions=broadcast_dimensions, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class BroadcastOp(_ods_ir.OpView):
  r"""
  This operation is on its way out of StableHLO, so it is not included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/3.
  
  Informally, this operation does the same thing as XLA's Broadcast:
  https://www.tensorflow.org/xla/operation_semantics#broadcast
  
  Example:
  ```mlir
  %result = stablehlo.broadcast %operand, sizes = [1, 2] : (tensor<3xi32>) -> tensor<1x2x3xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.broadcast"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, broadcast_sizes, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["broadcast_sizes"] = (broadcast_sizes if (
    isinstance(broadcast_sizes, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(broadcast_sizes, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def broadcast_sizes(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["broadcast_sizes"]

  @broadcast_sizes.setter
  def broadcast_sizes(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["broadcast_sizes"] = value

def broadcast(operand, broadcast_sizes, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return BroadcastOp(operand=operand, broadcast_sizes=broadcast_sizes, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CaseOp(_ods_ir.OpView):
  r"""
  Produces the output from executing exactly one `function` from `branches`
  depending on the value of `index`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#case
  
  Example:
  ```mlir
  %result0, %result1 = "stablehlo.case"(%index) ({
    stablehlo.return %result_branch0, %result_branch0 : tensor<2xi64>, tensor<2xi64>
  }, {
    stablehlo.return %result_branch1, %result_branch1 : tensor<2xi64>, tensor<2xi64>
  }) : (tensor<i32>) -> (tensor<2xi64>, tensor<2xi64>)
  ```
  """

  OPERATION_NAME = "stablehlo.case"

  _ODS_REGIONS = (0, False)

  def __init__(self, result, index, num_branches, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(index)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.extend(result)
    _ods_successors = None
    regions = 0 + num_branches
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def index(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def branches(self) -> _ods_ir.RegionSequence:
    return self.regions[0:]

def case(result, index, num_branches, *, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, CaseOp]:
  op = CaseOp(result=result, index=index, num_branches=num_branches, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class CbrtOp(_ods_ir.OpView):
  r"""
  Performs element-wise cubic root operation on `operand` tensor and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#cbrt
  
  Example:
  ```mlir
  %result = stablehlo.cbrt %operand : tensor<4xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.cbrt"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def cbrt(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CbrtOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CeilOp(_ods_ir.OpView):
  r"""
  Performs element-wise ceil of `operand` tensor and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#ceil
  
  Example:
  ```mlir
  %result = stablehlo.ceil %operand : tensor<5xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.ceil"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def ceil(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CeilOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CholeskyOp(_ods_ir.OpView):
  r"""
  Computes the Cholesky decomposition of a batch of matrices.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#cholesky
  
  Example:
  ```mlir
  %result = stablehlo.cholesky %a, lower = true : tensor<3x3xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.cholesky"

  _ODS_REGIONS = (0, True)

  def __init__(self, a, *, lower=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(a)
    _ods_context = _ods_get_default_loc_context(loc)
    if lower is not None: attributes["lower"] = (lower if (
        isinstance(lower, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(lower, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def a(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def lower(self) -> _Optional[_ods_ir.BoolAttr]:
    if "lower" not in self.operation.attributes:
      return None
    return self.operation.attributes["lower"]

  @lower.setter
  def lower(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["lower"] = value
    elif "lower" in self.operation.attributes:
      del self.operation.attributes["lower"]

  @lower.deleter
  def lower(self):
    del self.operation.attributes["lower"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def cholesky(a, *, lower=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CholeskyOp(a=a, lower=lower, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ClampOp(_ods_ir.OpView):
  r"""
  Clamps every element of the `operand` tensor between a minimum and maximum
  value and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#clamp
  
  Example:
  ```mlir
  %result = stablehlo.clamp %min, %operand, %max : tensor<3xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.clamp"

  _ODS_REGIONS = (0, True)

  def __init__(self, min, operand, max, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(min)
    operands.append(operand)
    operands.append(max)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def min(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def max(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def clamp(min, operand, max, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ClampOp(min=min, operand=operand, max=max, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ClzOp(_ods_ir.OpView):
  r"""
  Performs element-wise count of the number of leading zero bits in the
  `operand` tensor and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#count_leading_zeros
  
  Example:
  ```mlir
  %result = stablehlo.count_leading_zeros %operand : tensor<2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.count_leading_zeros"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def count_leading_zeros(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ClzOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CollectiveBroadcastOp(_ods_ir.OpView):
  r"""
  Within each process group in the process grid, send the value of the
  `operand` tensor from the source process to the target processes and produce a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#collective_broadcast
  
  Example:
  ```mlir
  %result = "stablehlo.collective_broadcast"(%operand) {
    replica_groups = dense<[[0, 1]]> : tensor<1x2xi64>,
    channel_handle = #stablehlo.channel_handle<handle = 0, type = 0>
  } : (tensor<1x2xi64>) -> tensor<1x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.collective_broadcast"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, replica_groups, *, channel_handle=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["replica_groups"] = (replica_groups if (
    isinstance(replica_groups, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
      _ods_ir.AttrBuilder.get('I64ElementsAttr')(replica_groups, context=_ods_context))
    if channel_handle is not None: attributes["channel_handle"] = (channel_handle if (
        isinstance(channel_handle, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
          _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def replica_groups(self) -> _ods_ir.DenseIntElementsAttr:
    return self.operation.attributes["replica_groups"]

  @replica_groups.setter
  def replica_groups(self, value: _ods_ir.DenseIntElementsAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["replica_groups"] = value

  @builtins.property
  def channel_handle(self) -> _Optional[_ods_ir.Attribute]:
    if "channel_handle" not in self.operation.attributes:
      return None
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["channel_handle"] = value
    elif "channel_handle" in self.operation.attributes:
      del self.operation.attributes["channel_handle"]

  @channel_handle.deleter
  def channel_handle(self):
    del self.operation.attributes["channel_handle"]

def collective_broadcast(operand, replica_groups, *, channel_handle=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CollectiveBroadcastOp(operand=operand, replica_groups=replica_groups, channel_handle=channel_handle, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CollectivePermuteOp(_ods_ir.OpView):
  r"""
  Within each process group in the process grid, sends the value of the
  `operand` tensor from the source process to the target process and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#collective_permute
  
  Example:
  ```mlir
  %result = "stablehlo.collective_permute"(%operand) {
    source_target_pairs = dense<[[0, 1], [1, 2]]> : tensor<2x2xi64>,
    channel_handle = #stablehlo.channel_handle<handle = 0, type = 0>
  } : (tensor<2x2xi64>) -> tensor<2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.collective_permute"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, source_target_pairs, *, channel_handle=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["source_target_pairs"] = (source_target_pairs if (
    isinstance(source_target_pairs, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
      _ods_ir.AttrBuilder.get('I64ElementsAttr')(source_target_pairs, context=_ods_context))
    if channel_handle is not None: attributes["channel_handle"] = (channel_handle if (
        isinstance(channel_handle, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
          _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def source_target_pairs(self) -> _ods_ir.DenseIntElementsAttr:
    return self.operation.attributes["source_target_pairs"]

  @source_target_pairs.setter
  def source_target_pairs(self, value: _ods_ir.DenseIntElementsAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["source_target_pairs"] = value

  @builtins.property
  def channel_handle(self) -> _Optional[_ods_ir.Attribute]:
    if "channel_handle" not in self.operation.attributes:
      return None
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["channel_handle"] = value
    elif "channel_handle" in self.operation.attributes:
      del self.operation.attributes["channel_handle"]

  @channel_handle.deleter
  def channel_handle(self):
    del self.operation.attributes["channel_handle"]

def collective_permute(operand, source_target_pairs, *, channel_handle=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CollectivePermuteOp(operand=operand, source_target_pairs=source_target_pairs, channel_handle=channel_handle, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CompareOp(_ods_ir.OpView):
  r"""
  Performs element-wise comparison of `lhs` and `rhs` tensors according to
  `comparison_direction` and `compare_type`, and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#compare
  
  Example:
  ```mlir
  %result = stablehlo.compare LT, %lhs, %rhs, FLOAT : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xi1>
  ```
  """

  OPERATION_NAME = "stablehlo.compare"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, comparison_direction, *, compare_type=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["comparison_direction"] = (comparison_direction if (
    isinstance(comparison_direction, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_ComparisonDirectionAttr')) else
      _ods_ir.AttrBuilder.get('StableHLO_ComparisonDirectionAttr')(comparison_direction, context=_ods_context))
    if compare_type is not None: attributes["compare_type"] = (compare_type if (
        isinstance(compare_type, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ComparisonTypeAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ComparisonTypeAttr')(compare_type, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def comparison_direction(self) -> _ods_ir.Attribute:
    return self.operation.attributes["comparison_direction"]

  @comparison_direction.setter
  def comparison_direction(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["comparison_direction"] = value

  @builtins.property
  def compare_type(self) -> _Optional[_ods_ir.Attribute]:
    if "compare_type" not in self.operation.attributes:
      return None
    return self.operation.attributes["compare_type"]

  @compare_type.setter
  def compare_type(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["compare_type"] = value
    elif "compare_type" in self.operation.attributes:
      del self.operation.attributes["compare_type"]

  @compare_type.deleter
  def compare_type(self):
    del self.operation.attributes["compare_type"]

def compare(lhs, rhs, comparison_direction, *, compare_type=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CompareOp(lhs=lhs, rhs=rhs, comparison_direction=comparison_direction, compare_type=compare_type, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ComplexOp(_ods_ir.OpView):
  r"""
  Performs element-wise conversion to a complex value from a pair of real and
  imaginary values, `lhs` and `rhs`, and produces a `result` tensor.
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#complex
  Example:
  ```mlir
  %result = stablehlo.complex %lhs, %rhs : tensor<2xcomplex<f64>>
  ```
  """

  OPERATION_NAME = "stablehlo.complex"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def complex(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ComplexOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CompositeOp(_ods_ir.OpView):
  r"""
  Encapsulates an operation made up (composed) of other StableHLO operations,
  taking `inputs` and `composite_attributes` and producing `results`. The
  semantics of the op are implemented by the `decomposition` attribute. The
  `composite` op can be replaced with its decomposition without changing program
  semantics. In cases where inlining the decomposition does not provide the same
  op semantics, prefer using `custom_call`.
  
  The `version` field (defaults to `0`) is used to denote when a composite's
  semantics change.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#composite
  
  Example:
  ```mlir
  %results = stablehlo.composite "my.op" %input0, %input1 {
    composite_attributes = {
      my_attribute = "my_value"
    },
    decomposition = @my_op,
    version = 1 : i32
  } : (tensor<f32>, tensor<f32>) -> tensor<f32>
  ```
  """

  OPERATION_NAME = "stablehlo.composite"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, inputs, name, decomposition, *, composite_attributes=None, version=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["name"] = (name if (
    isinstance(name, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StrAttr')) else
      _ods_ir.AttrBuilder.get('StrAttr')(name, context=_ods_context))
    if composite_attributes is not None: attributes["composite_attributes"] = (composite_attributes if (
        isinstance(composite_attributes, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DictionaryAttr')) else
          _ods_ir.AttrBuilder.get('DictionaryAttr')(composite_attributes, context=_ods_context))
    attributes["decomposition"] = (decomposition if (
    isinstance(decomposition, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('FlatSymbolRefAttr')) else
      _ods_ir.AttrBuilder.get('FlatSymbolRefAttr')(decomposition, context=_ods_context))
    if version is not None: attributes["version"] = (version if (
        isinstance(version, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('I32Attr')) else
          _ods_ir.AttrBuilder.get('I32Attr')(version, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def name(self) -> _ods_ir.StringAttr:
    return self.operation.attributes["name"]

  @name.setter
  def name(self, value: _ods_ir.StringAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["name"] = value

  @builtins.property
  def composite_attributes(self) -> _Optional[_ods_ir.DictAttr]:
    if "composite_attributes" not in self.operation.attributes:
      return None
    return self.operation.attributes["composite_attributes"]

  @composite_attributes.setter
  def composite_attributes(self, value: _Optional[_ods_ir.DictAttr]):
    if value is not None:
      self.operation.attributes["composite_attributes"] = value
    elif "composite_attributes" in self.operation.attributes:
      del self.operation.attributes["composite_attributes"]

  @composite_attributes.deleter
  def composite_attributes(self):
    del self.operation.attributes["composite_attributes"]

  @builtins.property
  def decomposition(self) -> _ods_ir.FlatSymbolRefAttr:
    return self.operation.attributes["decomposition"]

  @decomposition.setter
  def decomposition(self, value: _ods_ir.FlatSymbolRefAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["decomposition"] = value

  @builtins.property
  def version(self) -> _Optional[_ods_ir.IntegerAttr]:
    if "version" not in self.operation.attributes:
      return None
    return self.operation.attributes["version"]

  @version.setter
  def version(self, value: _Optional[_ods_ir.IntegerAttr]):
    if value is not None:
      self.operation.attributes["version"] = value
    elif "version" in self.operation.attributes:
      del self.operation.attributes["version"]

  @version.deleter
  def version(self):
    del self.operation.attributes["version"]

def composite(result, inputs, name, decomposition, *, composite_attributes=None, version=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, CompositeOp]:
  op = CompositeOp(result=result, inputs=inputs, name=name, decomposition=decomposition, composite_attributes=composite_attributes, version=version, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class ConcatenateOp(_ods_ir.OpView):
  r"""
  Concatenates a variadic number of tensors in `inputs` along `dimension`
  dimension in the same order as the given arguments and produces a `result`
  tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#concatenate
  
  Example:
  ```mlir
  %result = stablehlo.concatenate %input0, %input1, dim = 0 : (tensor<3x2xi64>, tensor<1x2xi64>) -> tensor<4x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.concatenate"

  _ODS_REGIONS = (0, True)

  def __init__(self, inputs, dimension, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimension"] = (dimension if (
    isinstance(dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(dimension, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["dimension"]

  @dimension.setter
  def dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimension"] = value

def concatenate(inputs, dimension, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ConcatenateOp(inputs=inputs, dimension=dimension, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ConstantOp(_ods_ir.OpView):
  r"""
  Produces an `output` tensor from a constant `value`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#constant
  
  Example:
  ```mlir
  %output = stablehlo.constant dense<[[0.0, 1.0], [2.0, 3.0]]> : tensor<2x2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.constant"

  _ODS_REGIONS = (0, True)

  def __init__(self, value, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["value"] = (value if (
    isinstance(value, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('ElementsAttr')) else
      _ods_ir.AttrBuilder.get('ElementsAttr')(value, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def value(self) -> _ods_ir.Attribute:
    return self.operation.attributes["value"]

  @value.setter
  def value(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["value"] = value

  @builtins.property
  def output(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def constant(value, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ConstantOp(value=value, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ConvertOp(_ods_ir.OpView):
  r"""
  Performs an element-wise conversion from one element type to another on
  `operand` tensor and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#convert
  
  Example:
  ```mlir
  %result = stablehlo.convert %operand : (tensor<3xi64>) -> tensor<3xcomplex<f64>>
  ```
  """

  OPERATION_NAME = "stablehlo.convert"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def convert(result, operand, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return ConvertOp(result=result, operand=operand, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ConvolutionOp(_ods_ir.OpView):
  r"""
  Computes dot products between windows of `lhs` and slices of `rhs` and
  produces `result`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#convolution
  
  Example:
  ```mlir
  %result = stablehlo.convolution(%lhs, %rhs)
    dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f],
    window = {
      stride = [4, 4],
      pad = [[0, 0], [0, 0]],
      lhs_dilate = [2, 2],
      rhs_dilate = [1, 1],
      reverse = [0, 0]
    } {
      feature_group_count = 1 : i64,
      batch_group_count = 1 : i64,
      precision_config = [#stablehlo<precision DEFAULT>, #stablehlo<precision DEFAULT>]
    } :
  (tensor<1x4x4x1xi64>, tensor<3x3x1x1xi64>) -> tensor<1x2x2x1xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.convolution"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, lhs, rhs, dimension_numbers, feature_group_count, batch_group_count, *, window_strides=None, padding=None, lhs_dilation=None, rhs_dilation=None, window_reversal=None, precision_config=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    if window_strides is not None: attributes["window_strides"] = (window_strides if (
        isinstance(window_strides, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(window_strides, context=_ods_context))
    if padding is not None: attributes["padding"] = (padding if (
        isinstance(padding, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
          _ods_ir.AttrBuilder.get('I64ElementsAttr')(padding, context=_ods_context))
    if lhs_dilation is not None: attributes["lhs_dilation"] = (lhs_dilation if (
        isinstance(lhs_dilation, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(lhs_dilation, context=_ods_context))
    if rhs_dilation is not None: attributes["rhs_dilation"] = (rhs_dilation if (
        isinstance(rhs_dilation, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(rhs_dilation, context=_ods_context))
    if window_reversal is not None: attributes["window_reversal"] = (window_reversal if (
        isinstance(window_reversal, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseBoolArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseBoolArrayAttr')(window_reversal, context=_ods_context))
    attributes["dimension_numbers"] = (dimension_numbers if (
    isinstance(dimension_numbers, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_ConvDimensionNumbers')) else
      _ods_ir.AttrBuilder.get('StableHLO_ConvDimensionNumbers')(dimension_numbers, context=_ods_context))
    attributes["feature_group_count"] = (feature_group_count if (
    isinstance(feature_group_count, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(feature_group_count, context=_ods_context))
    attributes["batch_group_count"] = (batch_group_count if (
    isinstance(batch_group_count, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(batch_group_count, context=_ods_context))
    if precision_config is not None: attributes["precision_config"] = (precision_config if (
        isinstance(precision_config, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_PrecisionConfigAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_PrecisionConfigAttr')(precision_config, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def window_strides(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "window_strides" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_strides"]

  @window_strides.setter
  def window_strides(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["window_strides"] = value
    elif "window_strides" in self.operation.attributes:
      del self.operation.attributes["window_strides"]

  @window_strides.deleter
  def window_strides(self):
    del self.operation.attributes["window_strides"]

  @builtins.property
  def padding(self) -> _Optional[_ods_ir.DenseIntElementsAttr]:
    if "padding" not in self.operation.attributes:
      return None
    return self.operation.attributes["padding"]

  @padding.setter
  def padding(self, value: _Optional[_ods_ir.DenseIntElementsAttr]):
    if value is not None:
      self.operation.attributes["padding"] = value
    elif "padding" in self.operation.attributes:
      del self.operation.attributes["padding"]

  @padding.deleter
  def padding(self):
    del self.operation.attributes["padding"]

  @builtins.property
  def lhs_dilation(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "lhs_dilation" not in self.operation.attributes:
      return None
    return self.operation.attributes["lhs_dilation"]

  @lhs_dilation.setter
  def lhs_dilation(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["lhs_dilation"] = value
    elif "lhs_dilation" in self.operation.attributes:
      del self.operation.attributes["lhs_dilation"]

  @lhs_dilation.deleter
  def lhs_dilation(self):
    del self.operation.attributes["lhs_dilation"]

  @builtins.property
  def rhs_dilation(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "rhs_dilation" not in self.operation.attributes:
      return None
    return self.operation.attributes["rhs_dilation"]

  @rhs_dilation.setter
  def rhs_dilation(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["rhs_dilation"] = value
    elif "rhs_dilation" in self.operation.attributes:
      del self.operation.attributes["rhs_dilation"]

  @rhs_dilation.deleter
  def rhs_dilation(self):
    del self.operation.attributes["rhs_dilation"]

  @builtins.property
  def window_reversal(self) -> _Optional[_ods_ir.DenseBoolArrayAttr]:
    if "window_reversal" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_reversal"]

  @window_reversal.setter
  def window_reversal(self, value: _Optional[_ods_ir.DenseBoolArrayAttr]):
    if value is not None:
      self.operation.attributes["window_reversal"] = value
    elif "window_reversal" in self.operation.attributes:
      del self.operation.attributes["window_reversal"]

  @window_reversal.deleter
  def window_reversal(self):
    del self.operation.attributes["window_reversal"]

  @builtins.property
  def dimension_numbers(self) -> _ods_ir.Attribute:
    return self.operation.attributes["dimension_numbers"]

  @dimension_numbers.setter
  def dimension_numbers(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimension_numbers"] = value

  @builtins.property
  def feature_group_count(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["feature_group_count"]

  @feature_group_count.setter
  def feature_group_count(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["feature_group_count"] = value

  @builtins.property
  def batch_group_count(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["batch_group_count"]

  @batch_group_count.setter
  def batch_group_count(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["batch_group_count"] = value

  @builtins.property
  def precision_config(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "precision_config" not in self.operation.attributes:
      return None
    return self.operation.attributes["precision_config"]

  @precision_config.setter
  def precision_config(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["precision_config"] = value
    elif "precision_config" in self.operation.attributes:
      del self.operation.attributes["precision_config"]

  @precision_config.deleter
  def precision_config(self):
    del self.operation.attributes["precision_config"]

def convolution(result, lhs, rhs, dimension_numbers, feature_group_count, batch_group_count, *, window_strides=None, padding=None, lhs_dilation=None, rhs_dilation=None, window_reversal=None, precision_config=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ConvolutionOp(result=result, lhs=lhs, rhs=rhs, dimension_numbers=dimension_numbers, feature_group_count=feature_group_count, batch_group_count=batch_group_count, window_strides=window_strides, padding=padding, lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation, window_reversal=window_reversal, precision_config=precision_config, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CosineOp(_ods_ir.OpView):
  r"""
  Performs element-wise cosine operation on `operand` tensor and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#cosine
  
  Example:
  ```mlir
  %result = stablehlo.cosine %operand : tensor<2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.cosine"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def cosine(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CosineOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CreateTokenOp(_ods_ir.OpView):
  r"""
  This operation is on its way out of StableHLO, so it is not included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/3.
  
  Informally, this operation does the same thing as AfterAllOp with 0 inputs:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#after_all
  
  Example:
  ```mlir
  %output = stablehlo.create_token : !stablehlo.token
  ```
  """

  OPERATION_NAME = "stablehlo.create_token"

  _ODS_REGIONS = (0, True)

  def __init__(self, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def output(self) -> _ods_ir.OpResult:
    return self.operation.results[0]

def create_token(*, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CreateTokenOp(results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CrossReplicaSumOp(_ods_ir.OpView):
  r"""
  This operation is on its way out of StableHLO, so it is not included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/3.
  
  Informally, this operation does the same thing as AllReduceOp with
  `channel_id = 0`, `use_global_device_ids = false` and `computation`
  implementing addition:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#all_reduce
  
  Example:
  ```mlir
  %result = "stablehlo.cross-replica-sum"(%operand) {
    replica_groups = dense<[[0, 1]]> : tensor<1x2xi64>
  } : (tensor<4xf32>) -> tensor<4xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.cross-replica-sum"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, replica_groups, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["replica_groups"] = (replica_groups if (
    isinstance(replica_groups, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
      _ods_ir.AttrBuilder.get('I64ElementsAttr')(replica_groups, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def replica_groups(self) -> _ods_ir.DenseIntElementsAttr:
    return self.operation.attributes["replica_groups"]

  @replica_groups.setter
  def replica_groups(self, value: _ods_ir.DenseIntElementsAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["replica_groups"] = value

def cross_replica_sum(operand, replica_groups, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return CrossReplicaSumOp(operand=operand, replica_groups=replica_groups, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class CustomCallOp(_ods_ir.OpView):
  r"""
  Encapsulates an implementation-defined operation `call_target_name` that
  takes `inputs` and `called_computations` and produces `results`.
  
  Depending on the API version there are two ways to pass extra bits of static
  information to the external function:
  1. Use `API_VERSION_TYPED_FFI` which allows passing a dictionary attribute.
  2. Use a previous API version with a StringAttr to encode backend config.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#custom_call
  
  Example:
  ```mlir
  %results = stablehlo.custom_call @foo(%input0) {
    backend_config = {bar = 42 : i32},
    api_version = 4 : i32,
    called_computations = [@foo]
  } : (tensor<f64>) -> tensor<f64>
  ```
  """

  OPERATION_NAME = "stablehlo.custom_call"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, inputs, call_target_name, *, has_side_effect=None, backend_config=None, api_version=None, called_computations=None, operand_layouts=None, result_layouts=None, output_operand_aliases=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["call_target_name"] = (call_target_name if (
    isinstance(call_target_name, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StrAttr')) else
      _ods_ir.AttrBuilder.get('StrAttr')(call_target_name, context=_ods_context))
    if has_side_effect is not None: attributes["has_side_effect"] = (has_side_effect if (
        isinstance(has_side_effect, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(has_side_effect, context=_ods_context))
    if backend_config is not None: attributes["backend_config"] = (backend_config if (
        isinstance(backend_config, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('anonymous_796')) else
          _ods_ir.AttrBuilder.get('anonymous_796')(backend_config, context=_ods_context))
    if api_version is not None: attributes["api_version"] = (api_version if (
        isinstance(api_version, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_CustomCallApiVersionAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_CustomCallApiVersionAttr')(api_version, context=_ods_context))
    if called_computations is not None: attributes["called_computations"] = (called_computations if (
        isinstance(called_computations, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_FlatSymbolRefArrayAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_FlatSymbolRefArrayAttr')(called_computations, context=_ods_context))
    if operand_layouts is not None: attributes["operand_layouts"] = (operand_layouts if (
        isinstance(operand_layouts, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ArrayOfLayoutAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ArrayOfLayoutAttr')(operand_layouts, context=_ods_context))
    if result_layouts is not None: attributes["result_layouts"] = (result_layouts if (
        isinstance(result_layouts, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ArrayOfLayoutAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ArrayOfLayoutAttr')(result_layouts, context=_ods_context))
    if output_operand_aliases is not None: attributes["output_operand_aliases"] = (output_operand_aliases if (
        isinstance(output_operand_aliases, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('anonymous_802')) else
          _ods_ir.AttrBuilder.get('anonymous_802')(output_operand_aliases, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def call_target_name(self) -> _ods_ir.StringAttr:
    return self.operation.attributes["call_target_name"]

  @call_target_name.setter
  def call_target_name(self, value: _ods_ir.StringAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["call_target_name"] = value

  @builtins.property
  def has_side_effect(self) -> _Optional[_ods_ir.BoolAttr]:
    if "has_side_effect" not in self.operation.attributes:
      return None
    return self.operation.attributes["has_side_effect"]

  @has_side_effect.setter
  def has_side_effect(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["has_side_effect"] = value
    elif "has_side_effect" in self.operation.attributes:
      del self.operation.attributes["has_side_effect"]

  @has_side_effect.deleter
  def has_side_effect(self):
    del self.operation.attributes["has_side_effect"]

  @builtins.property
  def backend_config(self) -> _Optional[_ods_ir.Attribute]:
    if "backend_config" not in self.operation.attributes:
      return None
    return self.operation.attributes["backend_config"]

  @backend_config.setter
  def backend_config(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["backend_config"] = value
    elif "backend_config" in self.operation.attributes:
      del self.operation.attributes["backend_config"]

  @backend_config.deleter
  def backend_config(self):
    del self.operation.attributes["backend_config"]

  @builtins.property
  def api_version(self) -> _Optional[_ods_ir.Attribute]:
    if "api_version" not in self.operation.attributes:
      return None
    return self.operation.attributes["api_version"]

  @api_version.setter
  def api_version(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["api_version"] = value
    elif "api_version" in self.operation.attributes:
      del self.operation.attributes["api_version"]

  @api_version.deleter
  def api_version(self):
    del self.operation.attributes["api_version"]

  @builtins.property
  def called_computations(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "called_computations" not in self.operation.attributes:
      return None
    return self.operation.attributes["called_computations"]

  @called_computations.setter
  def called_computations(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["called_computations"] = value
    elif "called_computations" in self.operation.attributes:
      del self.operation.attributes["called_computations"]

  @called_computations.deleter
  def called_computations(self):
    del self.operation.attributes["called_computations"]

  @builtins.property
  def operand_layouts(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "operand_layouts" not in self.operation.attributes:
      return None
    return self.operation.attributes["operand_layouts"]

  @operand_layouts.setter
  def operand_layouts(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["operand_layouts"] = value
    elif "operand_layouts" in self.operation.attributes:
      del self.operation.attributes["operand_layouts"]

  @operand_layouts.deleter
  def operand_layouts(self):
    del self.operation.attributes["operand_layouts"]

  @builtins.property
  def result_layouts(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "result_layouts" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_layouts"]

  @result_layouts.setter
  def result_layouts(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["result_layouts"] = value
    elif "result_layouts" in self.operation.attributes:
      del self.operation.attributes["result_layouts"]

  @result_layouts.deleter
  def result_layouts(self):
    del self.operation.attributes["result_layouts"]

  @builtins.property
  def output_operand_aliases(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "output_operand_aliases" not in self.operation.attributes:
      return None
    return self.operation.attributes["output_operand_aliases"]

  @output_operand_aliases.setter
  def output_operand_aliases(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["output_operand_aliases"] = value
    elif "output_operand_aliases" in self.operation.attributes:
      del self.operation.attributes["output_operand_aliases"]

  @output_operand_aliases.deleter
  def output_operand_aliases(self):
    del self.operation.attributes["output_operand_aliases"]

def custom_call(result, inputs, call_target_name, *, has_side_effect=None, backend_config=None, api_version=None, called_computations=None, operand_layouts=None, result_layouts=None, output_operand_aliases=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, CustomCallOp]:
  op = CustomCallOp(result=result, inputs=inputs, call_target_name=call_target_name, has_side_effect=has_side_effect, backend_config=backend_config, api_version=api_version, called_computations=called_computations, operand_layouts=operand_layouts, result_layouts=result_layouts, output_operand_aliases=output_operand_aliases, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class DivOp(_ods_ir.OpView):
  r"""
  Performs element-wise division of dividend `lhs` and divisor `rhs` tensors
  and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#divide
  
  Example:
  ```mlir
  %result = stablehlo.divide %lhs, %rhs : tensor<4xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.divide"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def divide(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DivOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DotGeneralOp(_ods_ir.OpView):
  r"""
  Computes dot products between slices of `lhs` and slices of `rhs` and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dot_general
  
  Example:
  ```mlir
  %result = stablehlo.dot_general %lhs, %rhs,
    batching_dims = [0] x [0],
    contracting_dims = [2] x [1],
    precision = [DEFAULT, DEFAULT],
    algorithm = <lhs_precision_type = tf32, rhs_precision_type = tf32, accumulation_type = f32, lhs_component_count = 1, rhs_component_count = 1, num_primitive_operations = 1, allow_imprecise_accumulation = false>
    : (tensor<2x2x2xi64>, tensor<2x2x2xi64>) -> tensor<2x2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.dot_general"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, lhs, rhs, dot_dimension_numbers, *, precision_config=None, algorithm=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dot_dimension_numbers"] = (dot_dimension_numbers if (
    isinstance(dot_dimension_numbers, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_DotDimensionNumbers')) else
      _ods_ir.AttrBuilder.get('StableHLO_DotDimensionNumbers')(dot_dimension_numbers, context=_ods_context))
    if precision_config is not None: attributes["precision_config"] = (precision_config if (
        isinstance(precision_config, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_PrecisionConfigAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_PrecisionConfigAttr')(precision_config, context=_ods_context))
    if algorithm is not None: attributes["algorithm"] = (algorithm if (
        isinstance(algorithm, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_DotAlgorithm')) else
          _ods_ir.AttrBuilder.get('StableHLO_DotAlgorithm')(algorithm, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def dot_dimension_numbers(self) -> _ods_ir.Attribute:
    return self.operation.attributes["dot_dimension_numbers"]

  @dot_dimension_numbers.setter
  def dot_dimension_numbers(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dot_dimension_numbers"] = value

  @builtins.property
  def precision_config(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "precision_config" not in self.operation.attributes:
      return None
    return self.operation.attributes["precision_config"]

  @precision_config.setter
  def precision_config(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["precision_config"] = value
    elif "precision_config" in self.operation.attributes:
      del self.operation.attributes["precision_config"]

  @precision_config.deleter
  def precision_config(self):
    del self.operation.attributes["precision_config"]

  @builtins.property
  def algorithm(self) -> _Optional[_ods_ir.Attribute]:
    if "algorithm" not in self.operation.attributes:
      return None
    return self.operation.attributes["algorithm"]

  @algorithm.setter
  def algorithm(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["algorithm"] = value
    elif "algorithm" in self.operation.attributes:
      del self.operation.attributes["algorithm"]

  @algorithm.deleter
  def algorithm(self):
    del self.operation.attributes["algorithm"]

def dot_general(result, lhs, rhs, dot_dimension_numbers, *, precision_config=None, algorithm=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DotGeneralOp(result=result, lhs=lhs, rhs=rhs, dot_dimension_numbers=dot_dimension_numbers, precision_config=precision_config, algorithm=algorithm, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DotOp(_ods_ir.OpView):
  r"""
  This operation is on its way out of StableHLO, so it is not included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/3.
  
  Informally, this operation does the same thing as XLA's Dot:
  https://www.tensorflow.org/xla/operation_semantics#dot
  
  Example:
  ```mlir
  %0 = stablehlo.dot %arg0, %arg1 : (tensor<1x2xi32>, tensor<2x1xi32>) -> tensor<1x1xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.dot"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, lhs, rhs, *, precision_config=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    if precision_config is not None: attributes["precision_config"] = (precision_config if (
        isinstance(precision_config, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_PrecisionConfigAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_PrecisionConfigAttr')(precision_config, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def precision_config(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "precision_config" not in self.operation.attributes:
      return None
    return self.operation.attributes["precision_config"]

  @precision_config.setter
  def precision_config(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["precision_config"] = value
    elif "precision_config" in self.operation.attributes:
      del self.operation.attributes["precision_config"]

  @precision_config.deleter
  def precision_config(self):
    del self.operation.attributes["precision_config"]

def dot(result, lhs, rhs, *, precision_config=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DotOp(result=result, lhs=lhs, rhs=rhs, precision_config=precision_config, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicBroadcastInDimOp(_ods_ir.OpView):
  r"""
  This operation is functionally identical to
  [broadcast_in_dim](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#broadcast_in_dim)
  op, but the result shape is specified dynamically via `output_dimensions`.
  
  It also accepts optional attributes to express static knowledge about the
  expanding behavior of dimensions. If not specified, all dimensions are
  assumed to be possibly expanding. The sets of dimensions that are known to
  be expanding and the set of dimensions that are known to be non-expanding
  must be disjoint and they must be a subset of the operand's dimensions.
  
  See: https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dynamic_broadcast_in_dim
  
  Example:
  ```mlir
  %operand = stablehlo.constant dense<[[1, 2, 3]]> : tensor<1x3xi64>
  %output_dimensions = stablehlo.constant dense<[2, 3, 2]> : tensor<3xi64>
  %result = "stablehlo.dynamic_broadcast_in_dim"(%operand, %output_dimensions) {
    broadcast_dimensions = array<i64: 2, 1>,
    known_expanding_dimensions = array<i64: 0>,
    known_nonexpanding_dimensions = array<i64: 1>
  } : (tensor<1x3xi64>, tensor<3xi64>) -> tensor<2x3x2xi64>
  ```
  
  """

  OPERATION_NAME = "stablehlo.dynamic_broadcast_in_dim"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, output_dimensions, broadcast_dimensions, *, known_expanding_dimensions=None, known_nonexpanding_dimensions=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(output_dimensions)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["broadcast_dimensions"] = (broadcast_dimensions if (
    isinstance(broadcast_dimensions, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(broadcast_dimensions, context=_ods_context))
    if known_expanding_dimensions is not None: attributes["known_expanding_dimensions"] = (known_expanding_dimensions if (
        isinstance(known_expanding_dimensions, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(known_expanding_dimensions, context=_ods_context))
    if known_nonexpanding_dimensions is not None: attributes["known_nonexpanding_dimensions"] = (known_nonexpanding_dimensions if (
        isinstance(known_nonexpanding_dimensions, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(known_nonexpanding_dimensions, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def output_dimensions(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def broadcast_dimensions(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["broadcast_dimensions"]

  @broadcast_dimensions.setter
  def broadcast_dimensions(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["broadcast_dimensions"] = value

  @builtins.property
  def known_expanding_dimensions(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "known_expanding_dimensions" not in self.operation.attributes:
      return None
    return self.operation.attributes["known_expanding_dimensions"]

  @known_expanding_dimensions.setter
  def known_expanding_dimensions(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["known_expanding_dimensions"] = value
    elif "known_expanding_dimensions" in self.operation.attributes:
      del self.operation.attributes["known_expanding_dimensions"]

  @known_expanding_dimensions.deleter
  def known_expanding_dimensions(self):
    del self.operation.attributes["known_expanding_dimensions"]

  @builtins.property
  def known_nonexpanding_dimensions(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "known_nonexpanding_dimensions" not in self.operation.attributes:
      return None
    return self.operation.attributes["known_nonexpanding_dimensions"]

  @known_nonexpanding_dimensions.setter
  def known_nonexpanding_dimensions(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["known_nonexpanding_dimensions"] = value
    elif "known_nonexpanding_dimensions" in self.operation.attributes:
      del self.operation.attributes["known_nonexpanding_dimensions"]

  @known_nonexpanding_dimensions.deleter
  def known_nonexpanding_dimensions(self):
    del self.operation.attributes["known_nonexpanding_dimensions"]

def dynamic_broadcast_in_dim(result, operand, output_dimensions, broadcast_dimensions, *, known_expanding_dimensions=None, known_nonexpanding_dimensions=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicBroadcastInDimOp(result=result, operand=operand, output_dimensions=output_dimensions, broadcast_dimensions=broadcast_dimensions, known_expanding_dimensions=known_expanding_dimensions, known_nonexpanding_dimensions=known_nonexpanding_dimensions, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicConvOp(_ods_ir.OpView):
  r"""
  This operation is functionally identical to
  [convolution](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#convolution)
  op, but the padding is specified dynamically via `padding`.
  
  Example:
  ```mlir
  %padding = stablehlo.constant dense<2> : tensor<2x2xi64>
  %result = "stablehlo.dynamic_conv"(%lhs, %rhs, %padding) {
    window_strides = array<i64: 4, 4>,
    lhs_dilation = array<i64: 2, 2>,
    rhs_dilation = array<i64: 1, 1>,
    window_reversal = array<i1: false, false>,
    dimension_numbers = #stablehlo.conv<[b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f]>,
    batch_group_count = 1 : i64,
    feature_group_count = 1 : i64,
    precision_config = [#stablehlo<precision DEFAULT>, #stablehlo<precision DEFAULT>]
  } : (tensor<1x4x4x1xi64>, tensor<3x3x1x1xi64>, tensor<2x2xi64>) -> tensor<1x2x2x1xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.dynamic_conv"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, lhs, rhs, padding, dimension_numbers, feature_group_count, batch_group_count, *, window_strides=None, lhs_dilation=None, rhs_dilation=None, window_reversal=None, precision_config=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    operands.append(padding)
    _ods_context = _ods_get_default_loc_context(loc)
    if window_strides is not None: attributes["window_strides"] = (window_strides if (
        isinstance(window_strides, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(window_strides, context=_ods_context))
    if lhs_dilation is not None: attributes["lhs_dilation"] = (lhs_dilation if (
        isinstance(lhs_dilation, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(lhs_dilation, context=_ods_context))
    if rhs_dilation is not None: attributes["rhs_dilation"] = (rhs_dilation if (
        isinstance(rhs_dilation, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(rhs_dilation, context=_ods_context))
    if window_reversal is not None: attributes["window_reversal"] = (window_reversal if (
        isinstance(window_reversal, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseBoolArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseBoolArrayAttr')(window_reversal, context=_ods_context))
    attributes["dimension_numbers"] = (dimension_numbers if (
    isinstance(dimension_numbers, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_ConvDimensionNumbers')) else
      _ods_ir.AttrBuilder.get('StableHLO_ConvDimensionNumbers')(dimension_numbers, context=_ods_context))
    attributes["feature_group_count"] = (feature_group_count if (
    isinstance(feature_group_count, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(feature_group_count, context=_ods_context))
    attributes["batch_group_count"] = (batch_group_count if (
    isinstance(batch_group_count, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(batch_group_count, context=_ods_context))
    if precision_config is not None: attributes["precision_config"] = (precision_config if (
        isinstance(precision_config, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_PrecisionConfigAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_PrecisionConfigAttr')(precision_config, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def padding(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def window_strides(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "window_strides" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_strides"]

  @window_strides.setter
  def window_strides(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["window_strides"] = value
    elif "window_strides" in self.operation.attributes:
      del self.operation.attributes["window_strides"]

  @window_strides.deleter
  def window_strides(self):
    del self.operation.attributes["window_strides"]

  @builtins.property
  def lhs_dilation(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "lhs_dilation" not in self.operation.attributes:
      return None
    return self.operation.attributes["lhs_dilation"]

  @lhs_dilation.setter
  def lhs_dilation(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["lhs_dilation"] = value
    elif "lhs_dilation" in self.operation.attributes:
      del self.operation.attributes["lhs_dilation"]

  @lhs_dilation.deleter
  def lhs_dilation(self):
    del self.operation.attributes["lhs_dilation"]

  @builtins.property
  def rhs_dilation(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "rhs_dilation" not in self.operation.attributes:
      return None
    return self.operation.attributes["rhs_dilation"]

  @rhs_dilation.setter
  def rhs_dilation(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["rhs_dilation"] = value
    elif "rhs_dilation" in self.operation.attributes:
      del self.operation.attributes["rhs_dilation"]

  @rhs_dilation.deleter
  def rhs_dilation(self):
    del self.operation.attributes["rhs_dilation"]

  @builtins.property
  def window_reversal(self) -> _Optional[_ods_ir.DenseBoolArrayAttr]:
    if "window_reversal" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_reversal"]

  @window_reversal.setter
  def window_reversal(self, value: _Optional[_ods_ir.DenseBoolArrayAttr]):
    if value is not None:
      self.operation.attributes["window_reversal"] = value
    elif "window_reversal" in self.operation.attributes:
      del self.operation.attributes["window_reversal"]

  @window_reversal.deleter
  def window_reversal(self):
    del self.operation.attributes["window_reversal"]

  @builtins.property
  def dimension_numbers(self) -> _ods_ir.Attribute:
    return self.operation.attributes["dimension_numbers"]

  @dimension_numbers.setter
  def dimension_numbers(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimension_numbers"] = value

  @builtins.property
  def feature_group_count(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["feature_group_count"]

  @feature_group_count.setter
  def feature_group_count(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["feature_group_count"] = value

  @builtins.property
  def batch_group_count(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["batch_group_count"]

  @batch_group_count.setter
  def batch_group_count(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["batch_group_count"] = value

  @builtins.property
  def precision_config(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "precision_config" not in self.operation.attributes:
      return None
    return self.operation.attributes["precision_config"]

  @precision_config.setter
  def precision_config(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["precision_config"] = value
    elif "precision_config" in self.operation.attributes:
      del self.operation.attributes["precision_config"]

  @precision_config.deleter
  def precision_config(self):
    del self.operation.attributes["precision_config"]

def dynamic_conv(result, lhs, rhs, padding, dimension_numbers, feature_group_count, batch_group_count, *, window_strides=None, lhs_dilation=None, rhs_dilation=None, window_reversal=None, precision_config=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicConvOp(result=result, lhs=lhs, rhs=rhs, padding=padding, dimension_numbers=dimension_numbers, feature_group_count=feature_group_count, batch_group_count=batch_group_count, window_strides=window_strides, lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation, window_reversal=window_reversal, precision_config=precision_config, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicGatherOp(_ods_ir.OpView):
  r"""
  This operation is functionally identical to
  [gather](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#gather)
  op, with the `slice_sizes` specified dynamically as an operand.
  
  Example:
  ```mlir
  %slice_sizes = stablehlo.constant dense<[1, 2, 2]> : tensor<3xi64>
  %result = "stablehlo.dynamic_gather"(%operand, %start_indices, %slice_sizes) {
    dimension_numbers = #stablehlo.gather<
      offset_dims = [2, 3],
      collapsed_slice_dims = [0],
      start_index_map = [0, 2],
      index_vector_dim = 2>,
    indices_are_sorted = false
  } : (tensor<3x4x2xi64>, tensor<2x3x2xi64>, tensor<3xi64>) -> tensor<2x3x2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.dynamic_gather"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, start_indices, slice_sizes, dimension_numbers, *, indices_are_sorted=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(start_indices)
    operands.append(slice_sizes)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimension_numbers"] = (dimension_numbers if (
    isinstance(dimension_numbers, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_GatherDimensionNumbers')) else
      _ods_ir.AttrBuilder.get('StableHLO_GatherDimensionNumbers')(dimension_numbers, context=_ods_context))
    if indices_are_sorted is not None: attributes["indices_are_sorted"] = (indices_are_sorted if (
        isinstance(indices_are_sorted, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(indices_are_sorted, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def start_indices(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def slice_sizes(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def dimension_numbers(self) -> _ods_ir.Attribute:
    return self.operation.attributes["dimension_numbers"]

  @dimension_numbers.setter
  def dimension_numbers(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimension_numbers"] = value

  @builtins.property
  def indices_are_sorted(self) -> _Optional[_ods_ir.BoolAttr]:
    if "indices_are_sorted" not in self.operation.attributes:
      return None
    return self.operation.attributes["indices_are_sorted"]

  @indices_are_sorted.setter
  def indices_are_sorted(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["indices_are_sorted"] = value
    elif "indices_are_sorted" in self.operation.attributes:
      del self.operation.attributes["indices_are_sorted"]

  @indices_are_sorted.deleter
  def indices_are_sorted(self):
    del self.operation.attributes["indices_are_sorted"]

def dynamic_gather(operand, start_indices, slice_sizes, dimension_numbers, *, indices_are_sorted=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicGatherOp(operand=operand, start_indices=start_indices, slice_sizes=slice_sizes, dimension_numbers=dimension_numbers, indices_are_sorted=indices_are_sorted, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicIotaOp(_ods_ir.OpView):
  r"""
  This operation is functionally identical to
  [iota](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#iota)
  op, but the result shape is specified dynamically via `output_shape`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dynamic_iota
  
  Example:
  ```mlir
  %output_shape = stablehlo.constant dense<[4, 5]> : tensor<2xi64>
  %0 = stablehlo.dynamic_iota %output_shape, dim = 0 : (tensor<2xi64>) -> tensor<4x5xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.dynamic_iota"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, output_shape, iota_dimension, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(output_shape)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["iota_dimension"] = (iota_dimension if (
    isinstance(iota_dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(iota_dimension, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def output_shape(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def iota_dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["iota_dimension"]

  @iota_dimension.setter
  def iota_dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["iota_dimension"] = value

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def dynamic_iota(result, output_shape, iota_dimension, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicIotaOp(result=result, output_shape=output_shape, iota_dimension=iota_dimension, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicPadOp(_ods_ir.OpView):
  r"""
  This operation is functionally identical to
  [pad](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#pad)
  https://github.com/openxla/stablehlo/pull/2306#discussion_r1595669709
  op, but with `edge_padding_low`, `edge_padding_high` and `interior_padding`
  specified dynamically as values.
  
  See: https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dynamic_pad
  
  Example:
  ```mlir
  %edge_padding_low = stablehlo.constant dense<[0, 1]> : tensor<2xi32>
  %edge_padding_high = stablehlo.constant dense<[2, 1]> : tensor<2xi32>
  %interior_padding = stablehlo.constant dense<[1, 2]> : tensor<2xi32>
  %result = stablehlo.dynamic_pad %operand, %padding_value,
              %edge_padding_low, %edge_padding_high, %interior_padding
              : (tensor<2x3xi64>, tensor<i64>, tensor<2xi64>, tensor<2xi64>, tensor<2xi64>) -> tensor<5x9xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.dynamic_pad"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, padding_value, edge_padding_low, edge_padding_high, interior_padding, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(padding_value)
    operands.append(edge_padding_low)
    operands.append(edge_padding_high)
    operands.append(interior_padding)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def padding_value(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def edge_padding_low(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def edge_padding_high(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[3]

  @builtins.property
  def interior_padding(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[4]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def dynamic_pad(result, operand, padding_value, edge_padding_low, edge_padding_high, interior_padding, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicPadOp(result=result, operand=operand, padding_value=padding_value, edge_padding_low=edge_padding_low, edge_padding_high=edge_padding_high, interior_padding=interior_padding, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicReshapeOp(_ods_ir.OpView):
  r"""
  This operation is functionally identical to
  [reshape](https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reshape)
  op, but the result shape is specified dynamically via `output_shape`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dynamic_reshape
  
  Example:
  ```mlir
  %output_shape = stablehlo.constant dense<[3, 2]> : tensor<2xi64>
  %result = stablehlo.dynamic_reshape %operand, %output_shape : (tensor<2x3xi64>, tensor<2xi64>) -> tensor<3x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.dynamic_reshape"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, output_shape, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(output_shape)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def output_shape(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def dynamic_reshape(result, operand, output_shape, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicReshapeOp(result=result, operand=operand, output_shape=output_shape, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicSliceOp(_ods_ir.OpView):
  r"""
  Extracts a slice from the `operand` using dynamically-computed starting
  indices and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dynamic_slice
  
  Example:
  ```mlir
  %result = stablehlo.dynamic_slice %operand, %start_indices0, %start_indices1, sizes = [2, 2]
    : (tensor<4x4xi32>, tensor<i64>, tensor<i64>) -> tensor<2x2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.dynamic_slice"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, start_indices, slice_sizes, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.extend(_get_op_results_or_values(start_indices))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["slice_sizes"] = (slice_sizes if (
    isinstance(slice_sizes, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(slice_sizes, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def start_indices(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 2 + 1
    return self.operation.operands[1:1 + _ods_variadic_group_length]

  @builtins.property
  def slice_sizes(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["slice_sizes"]

  @slice_sizes.setter
  def slice_sizes(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["slice_sizes"] = value

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def dynamic_slice(operand, start_indices, slice_sizes, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicSliceOp(operand=operand, start_indices=start_indices, slice_sizes=slice_sizes, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class DynamicUpdateSliceOp(_ods_ir.OpView):
  r"""
  Produces a `result` tensor which is equal to the `operand` tensor except
  that the slice starting at `start_indices` is updated with the values in
  `update`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#dynamic_update_slice
  
  Example:
  ```mlir
  %result = stablehlo.dynamic_update_slice %operand, %update, %start_indices0, %start_indices1
    : (tensor<4x4xi32>, tensor<2x2xi32>, tensor<i64>, tensor<i64>) -> tensor<4x4xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.dynamic_update_slice"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, update, start_indices, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(update)
    operands.extend(_get_op_results_or_values(start_indices))
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def update(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def start_indices(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 3 + 1
    return self.operation.operands[2:2 + _ods_variadic_group_length]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def dynamic_update_slice(operand, update, start_indices, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return DynamicUpdateSliceOp(operand=operand, update=update, start_indices=start_indices, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class EinsumOp(_ods_ir.OpView):
  r"""
  This operation is on its way out of StableHLO, so it is not included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/3.
  
  Informally, this operation does the same thing as TF's einsum:
  https://www.tensorflow.org/api_docs/python/tf/einsum
  
  Example:
  ```mlir
  %result = "stablehlo.einsum"(%lhs, %rhs) {
    einsum_config = "ab,bc->ac"
  } : (tensor<4x16xf32>, tensor<16x4xf32>) -> tensor<4x4xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.einsum"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, lhs, rhs, einsum_config, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["einsum_config"] = (einsum_config if (
    isinstance(einsum_config, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StrAttr')) else
      _ods_ir.AttrBuilder.get('StrAttr')(einsum_config, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def einsum_config(self) -> _ods_ir.StringAttr:
    return self.operation.attributes["einsum_config"]

  @einsum_config.setter
  def einsum_config(self, value: _ods_ir.StringAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["einsum_config"] = value

def einsum(result, lhs, rhs, einsum_config, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return EinsumOp(result=result, lhs=lhs, rhs=rhs, einsum_config=einsum_config, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ExpOp(_ods_ir.OpView):
  r"""
  Performs element-wise exponential operation on `operand` tensor and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#exponential
  
  Example:
  ```mlir
  %result = stablehlo.exponential %operand : tensor<2x2xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.exponential"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def exponential(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ExpOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class Expm1Op(_ods_ir.OpView):
  r"""
  Performs element-wise exponential minus one operation on `operand` tensor
  and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#exponential_minus_one
  
  Example:
  ```mlir
  %result = stablehlo.exponential_minus_one %operand : tensor<2xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.exponential_minus_one"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def exponential_minus_one(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return Expm1Op(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class FftOp(_ods_ir.OpView):
  r"""
  Performs the forward and inverse Fourier transforms for real and complex
  inputs/outputs.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#fft
  
  Example:
  ```mlir
  %result = stablehlo.fft %operand, type = FFT, length = [4] : (tensor<4xcomplex<f32>>) -> tensor<4xcomplex<f32>>
  ```
  """

  OPERATION_NAME = "stablehlo.fft"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, fft_type, fft_length, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["fft_type"] = (fft_type if (
    isinstance(fft_type, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_FftTypeAttr')) else
      _ods_ir.AttrBuilder.get('StableHLO_FftTypeAttr')(fft_type, context=_ods_context))
    attributes["fft_length"] = (fft_length if (
    isinstance(fft_length, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(fft_length, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def fft_type(self) -> _ods_ir.Attribute:
    return self.operation.attributes["fft_type"]

  @fft_type.setter
  def fft_type(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["fft_type"] = value

  @builtins.property
  def fft_length(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["fft_length"]

  @fft_length.setter
  def fft_length(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["fft_length"] = value

def fft(operand, fft_type, fft_length, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return FftOp(operand=operand, fft_type=fft_type, fft_length=fft_length, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class FloorOp(_ods_ir.OpView):
  r"""
  Performs element-wise floor of `operand` tensor and produces a `result`
  tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#floor
  
  Example:
  ```mlir
  %result = stablehlo.floor %operand : tensor<2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.floor"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def floor(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return FloorOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class GatherOp(_ods_ir.OpView):
  r"""
  Gathers slices from `operand` tensor from offsets specified in
  `start_indices` and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#gather
  
  Example:
  ```mlir
  %result = "stablehlo.gather"(%operand, %start_indices) {
    dimension_numbers = #stablehlo.gather<
      offset_dims = [3, 4],
      collapsed_slice_dims = [1],
      operand_batching_dims = [0],
      start_indices_batching_dims = [1],
      start_index_map = [2, 1],
      index_vector_dim = 3>,
    slice_sizes = array<i64: 1, 1, 2, 2>,
    indices_are_sorted = false
  } : (tensor<2x3x4x2xi64>, tensor<2x2x3x2xi64>) -> tensor<2x2x3x2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.gather"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, start_indices, dimension_numbers, slice_sizes, *, indices_are_sorted=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(start_indices)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimension_numbers"] = (dimension_numbers if (
    isinstance(dimension_numbers, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_GatherDimensionNumbers')) else
      _ods_ir.AttrBuilder.get('StableHLO_GatherDimensionNumbers')(dimension_numbers, context=_ods_context))
    attributes["slice_sizes"] = (slice_sizes if (
    isinstance(slice_sizes, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(slice_sizes, context=_ods_context))
    if indices_are_sorted is not None: attributes["indices_are_sorted"] = (indices_are_sorted if (
        isinstance(indices_are_sorted, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(indices_are_sorted, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def start_indices(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def dimension_numbers(self) -> _ods_ir.Attribute:
    return self.operation.attributes["dimension_numbers"]

  @dimension_numbers.setter
  def dimension_numbers(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimension_numbers"] = value

  @builtins.property
  def slice_sizes(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["slice_sizes"]

  @slice_sizes.setter
  def slice_sizes(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["slice_sizes"] = value

  @builtins.property
  def indices_are_sorted(self) -> _Optional[_ods_ir.BoolAttr]:
    if "indices_are_sorted" not in self.operation.attributes:
      return None
    return self.operation.attributes["indices_are_sorted"]

  @indices_are_sorted.setter
  def indices_are_sorted(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["indices_are_sorted"] = value
    elif "indices_are_sorted" in self.operation.attributes:
      del self.operation.attributes["indices_are_sorted"]

  @indices_are_sorted.deleter
  def indices_are_sorted(self):
    del self.operation.attributes["indices_are_sorted"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def gather(operand, start_indices, dimension_numbers, slice_sizes, *, indices_are_sorted=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return GatherOp(operand=operand, start_indices=start_indices, dimension_numbers=dimension_numbers, slice_sizes=slice_sizes, indices_are_sorted=indices_are_sorted, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class GetDimensionSizeOp(_ods_ir.OpView):
  r"""
  Produces the size of the given `dimension` of the `operand`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#get_dimension_size
  
  Example:
  ```mlir
  %result = stablehlo.get_dimension_size %operand, dim = 1 : (tensor<2x3xi64>) -> tensor<i32>
  ```
  """

  OPERATION_NAME = "stablehlo.get_dimension_size"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, dimension, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimension"] = (dimension if (
    isinstance(dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(dimension, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["dimension"]

  @dimension.setter
  def dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimension"] = value

def get_dimension_size(operand, dimension, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return GetDimensionSizeOp(operand=operand, dimension=dimension, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class GetTupleElementOp(_ods_ir.OpView):
  r"""
  Extracts element at `index` position of the `operand` tuple and produces a
  `result`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#get_tuple_element
  
  Example:
  ```mlir
  %result = stablehlo.get_tuple_element %operand[0] : (tuple<tensor<2xf64>, tuple<tensor<i64>>>) -> tensor<2xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.get_tuple_element"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, index, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["index"] = (index if (
    isinstance(index, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I32Attr')) else
      _ods_ir.AttrBuilder.get('I32Attr')(index, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value:
    return self.operation.operands[0]

  @builtins.property
  def index(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["index"]

  @index.setter
  def index(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["index"] = value

def get_tuple_element(operand, index, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return GetTupleElementOp(operand=operand, index=index, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class IfOp(_ods_ir.OpView):
  r"""
  Produces the output from executing exactly one branch from `true_branch` or
  `false_branch` depending on the value of `pred`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#if
  
  Example:
  %result = "stablehlo.if"(%pred) ({
    "stablehlo.return"(%result_true_branch) : (tensor<i32>) -> ()
  }, {
    "stablehlo.return"(%result_false_branch) : (tensor<i32>) -> ()
  }) : (tensor<i1>) -> tensor<i32>
  """

  OPERATION_NAME = "stablehlo.if"

  _ODS_REGIONS = (2, True)

  def __init__(self, result, pred, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(pred)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def pred(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def true_branch(self) -> _ods_ir.Region:
    return self.regions[0]

  @builtins.property
  def false_branch(self) -> _ods_ir.Region:
    return self.regions[1]

def if_(result, pred, *, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, IfOp]:
  op = IfOp(result=result, pred=pred, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class ImagOp(_ods_ir.OpView):
  r"""
  Extracts the imaginary part, element-wise, from the `operand` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#imag
  
  Example:
  ```mlir
  %result = stablehlo.imag %operand : (tensor<2xcomplex<f32>>) -> tensor<2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.imag"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def imag(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ImagOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class InfeedOp(_ods_ir.OpView):
  r"""
  Reads data from the infeed and produces `results`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#infeed
  
  Example:
  ```mlir
  %results0:2 = "stablehlo.infeed"(%token) :
      (!stablehlo.token) -> (tensor<2x2xi64>, !stablehlo.token)
  ```
  """

  OPERATION_NAME = "stablehlo.infeed"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, token, *, infeed_config=None, layout=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(token)
    _ods_context = _ods_get_default_loc_context(loc)
    if infeed_config is not None: attributes["infeed_config"] = (infeed_config if (
        isinstance(infeed_config, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StrAttr')) else
          _ods_ir.AttrBuilder.get('StrAttr')(infeed_config, context=_ods_context))
    if layout is not None: attributes["layout"] = (layout if (
        isinstance(layout, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('ArrayAttr')) else
          _ods_ir.AttrBuilder.get('ArrayAttr')(layout, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def token(self) -> _ods_ir.Value:
    return self.operation.operands[0]

  @builtins.property
  def infeed_config(self) -> _ods_ir.StringAttr:
    return self.operation.attributes["infeed_config"]

  @infeed_config.setter
  def infeed_config(self, value: _ods_ir.StringAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["infeed_config"] = value

  @builtins.property
  def layout(self) -> _Optional[_ods_ir.ArrayAttr]:
    if "layout" not in self.operation.attributes:
      return None
    return self.operation.attributes["layout"]

  @layout.setter
  def layout(self, value: _Optional[_ods_ir.ArrayAttr]):
    if value is not None:
      self.operation.attributes["layout"] = value
    elif "layout" in self.operation.attributes:
      del self.operation.attributes["layout"]

  @layout.deleter
  def layout(self):
    del self.operation.attributes["layout"]

def infeed(result, token, *, infeed_config=None, layout=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, InfeedOp]:
  op = InfeedOp(result=result, token=token, infeed_config=infeed_config, layout=layout, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class IotaOp(_ods_ir.OpView):
  r"""
  Fills an `output` tensor with values in increasing order starting from zero
  along the `iota_dimension` dimension.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#iota
  
  Example:
  ```mlir
  %output = stablehlo.iota dim = 0 : tensor<4x5xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.iota"

  _ODS_REGIONS = (0, True)

  def __init__(self, output, iota_dimension, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["iota_dimension"] = (iota_dimension if (
    isinstance(iota_dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(iota_dimension, context=_ods_context))
    results = []
    results.append(output)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def iota_dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["iota_dimension"]

  @iota_dimension.setter
  def iota_dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["iota_dimension"] = value

  @builtins.property
  def output(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def iota(output, iota_dimension, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return IotaOp(output=output, iota_dimension=iota_dimension, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class IsFiniteOp(_ods_ir.OpView):
  r"""
  Performs element-wise check whether the value in `x` is finite (i.e. is
  neither +Inf, -Inf, nor NaN) and produces a `y` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#is_finite
  
  Example:
  ```mlir
  %y = stablehlo.is_finite %x : (tensor<7xf64>) -> tensor<7xi1>
  ```
  """

  OPERATION_NAME = "stablehlo.is_finite"

  _ODS_REGIONS = (0, True)

  def __init__(self, x, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(x)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def x(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def y(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def is_finite(x, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return IsFiniteOp(x=x, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class Log1pOp(_ods_ir.OpView):
  r"""
  Performs element-wise logarithm plus one operation on `operand` tensor and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#log_plus_one
  
  Example:
  ```mlir
  %result = stablehlo.log_plus_one %operand : tensor<5xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.log_plus_one"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def log_plus_one(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return Log1pOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class LogOp(_ods_ir.OpView):
  r"""
  Performs element-wise logarithm operation on `operand` tensor and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#log
  
  Example:
  ```mlir
  %result = stablehlo.log %operand : tensor<2x2xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.log"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def log(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return LogOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class LogisticOp(_ods_ir.OpView):
  r"""
  Performs element-wise logistic operation on `operand` tensor and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#logistic
  
  Example:
  ```mlir
  %result = stablehlo.logistic %operand : tensor<2x2xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.logistic"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def logistic(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return LogisticOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class MapOp(_ods_ir.OpView):
  r"""
  Applies a map function `computation` to `inputs` along the `dimensions` and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#map
  
  Example:
  ```mlir
  %result = "stablehlo.map"(%input0, %input1) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
      %0 = stablehlo.multiply %arg0, %arg1 : tensor<i64>
      stablehlo.return %0 : tensor<i64>
  }) {
    dimensions = array<i64: 0, 1>
  } : (tensor<2x2xi64>, tensor<2x2xi64>) -> tensor<2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.map"

  _ODS_REGIONS = (1, True)

  def __init__(self, result, inputs, dimensions, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimensions"] = (dimensions if (
    isinstance(dimensions, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(dimensions, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def dimensions(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["dimensions"]

  @dimensions.setter
  def dimensions(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimensions"] = value

  @builtins.property
  def computation(self) -> _ods_ir.Region:
    return self.regions[0]

def map(result, inputs, dimensions, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return MapOp(result=result, inputs=inputs, dimensions=dimensions, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class MaxOp(_ods_ir.OpView):
  r"""
  Performs element-wise max operation on tensors `lhs` and `rhs` and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#maximum
  
  Example:
  ```mlir
  %result = stablehlo.maximum %lhs, %rhs : tensor<4xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.maximum"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def maximum(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return MaxOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class MinOp(_ods_ir.OpView):
  r"""
  Performs element-wise min operation on tensors `lhs` and `rhs` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#minimum
  
  Example:
  ```mlir
  %result = stablehlo.minimum %lhs, %rhs : tensor<4xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.minimum"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def minimum(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return MinOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class MulOp(_ods_ir.OpView):
  r"""
  Performs element-wise product of two tensors `lhs` and `rhs` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#multiply
  
  Example:
  ```mlir
  %result = stablehlo.multiply %lhs, %rhs : tensor<2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.multiply"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def multiply(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return MulOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class NegOp(_ods_ir.OpView):
  r"""
  Performs element-wise negation of `operand` tensor and produces a `result`
  tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#negate
  
  Example:
  ```mlir
  %result = stablehlo.negate %operand : tensor<2x3xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.negate"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def negate(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return NegOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class NotOp(_ods_ir.OpView):
  r"""
  Performs element-wise NOT of tensor `operand` of type integer and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#not
  
  Example:
  ```mlir
  %result = stablehlo.not %operand : tensor<5x3x1xi1>
  ```
  """

  OPERATION_NAME = "stablehlo.not"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def not_(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return NotOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class OptimizationBarrierOp(_ods_ir.OpView):
  r"""
  Ensures that the operations that produce the `operand` are executed before any
  operations that depend on the `result` and prevents compiler transformations
  from moving operations across the barrier. Other than that, the operation is
  an identity, i.e. `result` = `operand`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#optimization_barrier
  
  Example:
  ```mlir
  %result0, %result1 = stablehlo.optimization_barrier %operand0, %operand1 : tensor<f32>, tensor<f32>
  ```
  """

  OPERATION_NAME = "stablehlo.optimization_barrier"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(operand))
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def result(self) -> _ods_ir.OpResultList:
    _ods_variadic_group_length = len(self.operation.results) - 1 + 1
    return self.operation.results[0:0 + _ods_variadic_group_length]

def optimization_barrier(operand, *, results=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, OptimizationBarrierOp]:
  op = OptimizationBarrierOp(operand=operand, results=results, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class OrOp(_ods_ir.OpView):
  r"""
  Performs element-wise OR of two tensors `lhs` and `rhs` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#or
  
  Example:
  ```mlir
  %result = stablehlo.or %lhs, %rhs : tensor<2xi1>
  ```
  """

  OPERATION_NAME = "stablehlo.or"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def or_(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return OrOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class OutfeedOp(_ods_ir.OpView):
  r"""
  Writes `inputs` to the outfeed and produces a `result` token.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#outfeed
  
  Example:
  ```mlir
  %result = "stablehlo.outfeed"(%input0, %token) :
      (tensor<2x2x2xi64>, !stablehlo.token) -> !stablehlo.token
  ```
  """

  OPERATION_NAME = "stablehlo.outfeed"

  _ODS_REGIONS = (0, True)

  def __init__(self, inputs, token, *, outfeed_config=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    operands.append(token)
    _ods_context = _ods_get_default_loc_context(loc)
    if outfeed_config is not None: attributes["outfeed_config"] = (outfeed_config if (
        isinstance(outfeed_config, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StrAttr')) else
          _ods_ir.AttrBuilder.get('StrAttr')(outfeed_config, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 2 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def token(self) -> _ods_ir.Value:
    _ods_variadic_group_length = len(self.operation.operands) - 2 + 1
    return self.operation.operands[1 + _ods_variadic_group_length - 1]

  @builtins.property
  def outfeed_config(self) -> _ods_ir.StringAttr:
    return self.operation.attributes["outfeed_config"]

  @outfeed_config.setter
  def outfeed_config(self, value: _ods_ir.StringAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["outfeed_config"] = value

def outfeed(inputs, token, *, outfeed_config=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return OutfeedOp(inputs=inputs, token=token, outfeed_config=outfeed_config, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class PadOp(_ods_ir.OpView):
  r"""
  Expands `operand` by padding around the tensor as well as between the
  elements of the tensor with the given `padding_value`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#pad
  
  Example:
  ```mlir
  %0 = stablehlo.pad %arg0, %arg1, low = [0, 1], high = [2, 1], interior = [1, 2]
    : (tensor<2x3xi32>, tensor<i32>) -> tensor<5x9xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.pad"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, padding_value, edge_padding_low, edge_padding_high, interior_padding, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(padding_value)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["edge_padding_low"] = (edge_padding_low if (
    isinstance(edge_padding_low, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(edge_padding_low, context=_ods_context))
    attributes["edge_padding_high"] = (edge_padding_high if (
    isinstance(edge_padding_high, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(edge_padding_high, context=_ods_context))
    attributes["interior_padding"] = (interior_padding if (
    isinstance(interior_padding, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(interior_padding, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def padding_value(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def edge_padding_low(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["edge_padding_low"]

  @edge_padding_low.setter
  def edge_padding_low(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["edge_padding_low"] = value

  @builtins.property
  def edge_padding_high(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["edge_padding_high"]

  @edge_padding_high.setter
  def edge_padding_high(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["edge_padding_high"] = value

  @builtins.property
  def interior_padding(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["interior_padding"]

  @interior_padding.setter
  def interior_padding(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["interior_padding"] = value

def pad(operand, padding_value, edge_padding_low, edge_padding_high, interior_padding, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return PadOp(operand=operand, padding_value=padding_value, edge_padding_low=edge_padding_low, edge_padding_high=edge_padding_high, interior_padding=interior_padding, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class PartitionIdOp(_ods_ir.OpView):
  r"""
  Produces `partition_id` of the current process.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#partition_id
  
  Example:
  ```mlir
  %result = stablehlo.partition_id : tensor<ui32>
  ```
  """

  OPERATION_NAME = "stablehlo.partition_id"

  _ODS_REGIONS = (0, True)

  def __init__(self, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

def partition_id(*, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return PartitionIdOp(results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class PopulationCountOp(_ods_ir.OpView):
  r"""
  Performs element-wise count of the number of bits set in the `operand`
  tensor and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#popcnt
  
  Example:
  ```mlir
  %result = stablehlo.popcnt %operand : tensor<4xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.popcnt"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def popcnt(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return PopulationCountOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class PowOp(_ods_ir.OpView):
  r"""
  Performs element-wise exponentiation of `lhs` tensor by `rhs` tensor and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#power
  
  Example:
  ```mlir
  %result = stablehlo.power %lhs, %rhs : tensor<6xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.power"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def power(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return PowOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class RealDynamicSliceOp(_ods_ir.OpView):
  r"""
  This operation is a work in progress, so it is not yet included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/8.
  
  Informally, this operation does the same thing as SliceOp except
  that `start_indices`, `limit_indices` and `strides` are specified dynamically:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#slice
  
  Example:
  ```mlir
  %result = stablehlo.real_dynamic_slice %operand,
              %start_indices, %limit_indices, %strides
         : (tensor<256x?xf32>, tensor<2xindex>, tensor<2xindex>, tensor<2xindex>) -> tensor<256x?xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.real_dynamic_slice"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, start_indices, limit_indices, strides, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(start_indices)
    operands.append(limit_indices)
    operands.append(strides)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def start_indices(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def limit_indices(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def strides(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[3]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def real_dynamic_slice(result, operand, start_indices, limit_indices, strides, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return RealDynamicSliceOp(result=result, operand=operand, start_indices=start_indices, limit_indices=limit_indices, strides=strides, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class RealOp(_ods_ir.OpView):
  r"""
  Extracts the real part, element-wise, from the `operand` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#real
  
  Example:
  ```mlir
  %result = stablehlo.real %operand : (tensor<2xcomplex<f32>>) -> tensor<2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.real"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def real(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return RealOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class RecvOp(_ods_ir.OpView):
  r"""
  Receives data from a channel with `channel_id` and produces `results`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#recv
  
  Example:
  ```mlir
  %results:2 = "stablehlo.recv"(%token) {
    channel_handle = #stablehlo.channel_handle<handle = 0, type = 1>,
    is_host_transfer = false,
    source_target_pairs = dense<[[0, 1], [1, 2]]> : tensor<2x2xi64>
  } : (!stablehlo.token) -> (tensor<2x2xi64>, !stablehlo.token)
  ```
  """

  OPERATION_NAME = "stablehlo.recv"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, token, channel_handle, *, is_host_transfer=None, source_target_pairs=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(token)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["channel_handle"] = (channel_handle if (
    isinstance(channel_handle, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
      _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    if is_host_transfer is not None: attributes["is_host_transfer"] = (is_host_transfer if (
        isinstance(is_host_transfer, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(is_host_transfer, context=_ods_context))
    if source_target_pairs is not None: attributes["source_target_pairs"] = (source_target_pairs if (
        isinstance(source_target_pairs, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
          _ods_ir.AttrBuilder.get('I64ElementsAttr')(source_target_pairs, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def token(self) -> _ods_ir.Value:
    return self.operation.operands[0]

  @builtins.property
  def channel_handle(self) -> _ods_ir.Attribute:
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["channel_handle"] = value

  @builtins.property
  def is_host_transfer(self) -> _Optional[_ods_ir.BoolAttr]:
    if "is_host_transfer" not in self.operation.attributes:
      return None
    return self.operation.attributes["is_host_transfer"]

  @is_host_transfer.setter
  def is_host_transfer(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["is_host_transfer"] = value
    elif "is_host_transfer" in self.operation.attributes:
      del self.operation.attributes["is_host_transfer"]

  @is_host_transfer.deleter
  def is_host_transfer(self):
    del self.operation.attributes["is_host_transfer"]

  @builtins.property
  def source_target_pairs(self) -> _Optional[_ods_ir.DenseIntElementsAttr]:
    if "source_target_pairs" not in self.operation.attributes:
      return None
    return self.operation.attributes["source_target_pairs"]

  @source_target_pairs.setter
  def source_target_pairs(self, value: _Optional[_ods_ir.DenseIntElementsAttr]):
    if value is not None:
      self.operation.attributes["source_target_pairs"] = value
    elif "source_target_pairs" in self.operation.attributes:
      del self.operation.attributes["source_target_pairs"]

  @source_target_pairs.deleter
  def source_target_pairs(self):
    del self.operation.attributes["source_target_pairs"]

def recv(result, token, channel_handle, *, is_host_transfer=None, source_target_pairs=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, RecvOp]:
  op = RecvOp(result=result, token=token, channel_handle=channel_handle, is_host_transfer=is_host_transfer, source_target_pairs=source_target_pairs, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class ReduceOp(_ods_ir.OpView):
  r"""
  Applies a reduction function `body` to `inputs` and `init_values` along the
  `dimensions` and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reduce
  
  Example:
  ```mlir
  %result = "stablehlo.reduce"(%input, %init_value) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
      %0 = stablehlo.add %arg0, %arg1 : tensor<i64>
      stablehlo.return %0 : tensor<i64>
  }) {
    dimensions = array<i64: 1>
  } : (tensor<1x6xi64>, tensor<i64>) -> tensor<1xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.reduce"

  _ODS_REGIONS = (1, True)

  def __init__(self, result, inputs, init_values, dimensions, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    operands.extend(_get_op_results_or_values(init_values))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimensions"] = (dimensions if (
    isinstance(dimensions, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(dimensions, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    start, elements_per_group = _ods_equally_sized_accessor(self.operation.operands, 0, 2, 0, 0)
    return self.operation.operands[start:start + elements_per_group]

  @builtins.property
  def init_values(self) -> _ods_ir.OpOperandList:
    start, elements_per_group = _ods_equally_sized_accessor(self.operation.operands, 0, 2, 0, 1)
    return self.operation.operands[start:start + elements_per_group]

  @builtins.property
  def dimensions(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["dimensions"]

  @dimensions.setter
  def dimensions(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimensions"] = value

  @builtins.property
  def body(self) -> _ods_ir.Region:
    return self.regions[0]

def reduce(result, inputs, init_values, dimensions, *, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, ReduceOp]:
  op = ReduceOp(result=result, inputs=inputs, init_values=init_values, dimensions=dimensions, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class ReducePrecisionOp(_ods_ir.OpView):
  r"""
  Performs element-wise conversion of `operand` to another floating-point type
  that uses `exponent_bits` and `mantissa_bits` and back to the original
  floating-point type and produces an `output` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reduce_precision
  
  Example:
  ```mlir
  %output = stablehlo.reduce_precision %operand, format = e5m10 : tensor<6xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.reduce_precision"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, exponent_bits, mantissa_bits, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["exponent_bits"] = (exponent_bits if (
    isinstance(exponent_bits, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I32Attr')) else
      _ods_ir.AttrBuilder.get('I32Attr')(exponent_bits, context=_ods_context))
    attributes["mantissa_bits"] = (mantissa_bits if (
    isinstance(mantissa_bits, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I32Attr')) else
      _ods_ir.AttrBuilder.get('I32Attr')(mantissa_bits, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def exponent_bits(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["exponent_bits"]

  @exponent_bits.setter
  def exponent_bits(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["exponent_bits"] = value

  @builtins.property
  def mantissa_bits(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["mantissa_bits"]

  @mantissa_bits.setter
  def mantissa_bits(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["mantissa_bits"] = value

  @builtins.property
  def output(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def reduce_precision(operand, exponent_bits, mantissa_bits, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ReducePrecisionOp(operand=operand, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ReduceScatterOp(_ods_ir.OpView):
  r"""
   Within each process group in the process grid, performs reduction, using
   `computations`, over the values of the `operand` tensor from each process,
   splits the reduction result along `scatter_dimension` into parts, and
   scatters the split parts between the processes to produce the `result`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reduce_scatter
  
  Example:
  ```mlir
  %result = "stablehlo.reduce_scatter"(%operand) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
    %0 = stablehlo.add %arg0, %arg1 : tensor<i64>
    stablehlo.return %0 : tensor<i64>
  }) {
    scatter_dimension = 1 : i64,
    replica_groups = dense<[[0, 1]]> : tensor<1x2xi64>,
    channel_handle = #stablehlo.channel_handle<handle = 0, type = 0>
  } : (tensor<2x4xi64>) -> tensor<2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.reduce_scatter"

  _ODS_REGIONS = (1, True)

  def __init__(self, result, operand, scatter_dimension, replica_groups, *, channel_handle=None, use_global_device_ids=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["scatter_dimension"] = (scatter_dimension if (
    isinstance(scatter_dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(scatter_dimension, context=_ods_context))
    attributes["replica_groups"] = (replica_groups if (
    isinstance(replica_groups, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
      _ods_ir.AttrBuilder.get('I64ElementsAttr')(replica_groups, context=_ods_context))
    if channel_handle is not None: attributes["channel_handle"] = (channel_handle if (
        isinstance(channel_handle, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
          _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    if bool(use_global_device_ids): attributes["use_global_device_ids"] = _ods_ir.UnitAttr.get(
      _ods_get_default_loc_context(loc))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def scatter_dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["scatter_dimension"]

  @scatter_dimension.setter
  def scatter_dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["scatter_dimension"] = value

  @builtins.property
  def replica_groups(self) -> _ods_ir.DenseIntElementsAttr:
    return self.operation.attributes["replica_groups"]

  @replica_groups.setter
  def replica_groups(self, value: _ods_ir.DenseIntElementsAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["replica_groups"] = value

  @builtins.property
  def channel_handle(self) -> _Optional[_ods_ir.Attribute]:
    if "channel_handle" not in self.operation.attributes:
      return None
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["channel_handle"] = value
    elif "channel_handle" in self.operation.attributes:
      del self.operation.attributes["channel_handle"]

  @channel_handle.deleter
  def channel_handle(self):
    del self.operation.attributes["channel_handle"]

  @builtins.property
  def use_global_device_ids(self) -> bool:
    return "use_global_device_ids" in self.operation.attributes

  @use_global_device_ids.setter
  def use_global_device_ids(self, value):
    if bool(value):
      self.operation.attributes["use_global_device_ids"] = _ods_ir.UnitAttr.get()
    elif "use_global_device_ids" in self.operation.attributes:
      del self.operation.attributes["use_global_device_ids"]

  @use_global_device_ids.deleter
  def use_global_device_ids(self):
    del self.operation.attributes["use_global_device_ids"]

  @builtins.property
  def computation(self) -> _ods_ir.Region:
    return self.regions[0]

def reduce_scatter(result, operand, scatter_dimension, replica_groups, *, channel_handle=None, use_global_device_ids=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ReduceScatterOp(result=result, operand=operand, scatter_dimension=scatter_dimension, replica_groups=replica_groups, channel_handle=channel_handle, use_global_device_ids=use_global_device_ids, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ReduceWindowOp(_ods_ir.OpView):
  r"""
  Applies a reduction function `body` to windows of `inputs` and `init_values`
  and produces `results`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reduce_window
  
  Example:
  ```mlir
  %result = "stablehlo.reduce_window"(%input, %init_value) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
      %0 = stablehlo.add %arg0, %arg1 : tensor<i64>
      stablehlo.return %0 : tensor<i64>
  }) {
    window_dimensions = array<i64: 2, 1>,
    window_strides = array<i64: 4, 1>,
    base_dilations = array<i64: 2, 1>,
    window_dilations = array<i64: 3, 1>,
    padding = dense<[[2, 1], [0, 0]]> : tensor<2x2xi64>
  } : (tensor<3x2xi64>, tensor<i64>) -> tensor<2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.reduce_window"

  _ODS_REGIONS = (1, True)

  def __init__(self, result, inputs, init_values, window_dimensions, *, window_strides=None, base_dilations=None, window_dilations=None, padding=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    operands.extend(_get_op_results_or_values(init_values))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["window_dimensions"] = (window_dimensions if (
    isinstance(window_dimensions, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(window_dimensions, context=_ods_context))
    if window_strides is not None: attributes["window_strides"] = (window_strides if (
        isinstance(window_strides, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(window_strides, context=_ods_context))
    if base_dilations is not None: attributes["base_dilations"] = (base_dilations if (
        isinstance(base_dilations, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(base_dilations, context=_ods_context))
    if window_dilations is not None: attributes["window_dilations"] = (window_dilations if (
        isinstance(window_dilations, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(window_dilations, context=_ods_context))
    if padding is not None: attributes["padding"] = (padding if (
        isinstance(padding, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
          _ods_ir.AttrBuilder.get('I64ElementsAttr')(padding, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    start, elements_per_group = _ods_equally_sized_accessor(self.operation.operands, 0, 2, 0, 0)
    return self.operation.operands[start:start + elements_per_group]

  @builtins.property
  def init_values(self) -> _ods_ir.OpOperandList:
    start, elements_per_group = _ods_equally_sized_accessor(self.operation.operands, 0, 2, 0, 1)
    return self.operation.operands[start:start + elements_per_group]

  @builtins.property
  def window_dimensions(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["window_dimensions"]

  @window_dimensions.setter
  def window_dimensions(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["window_dimensions"] = value

  @builtins.property
  def window_strides(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "window_strides" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_strides"]

  @window_strides.setter
  def window_strides(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["window_strides"] = value
    elif "window_strides" in self.operation.attributes:
      del self.operation.attributes["window_strides"]

  @window_strides.deleter
  def window_strides(self):
    del self.operation.attributes["window_strides"]

  @builtins.property
  def base_dilations(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "base_dilations" not in self.operation.attributes:
      return None
    return self.operation.attributes["base_dilations"]

  @base_dilations.setter
  def base_dilations(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["base_dilations"] = value
    elif "base_dilations" in self.operation.attributes:
      del self.operation.attributes["base_dilations"]

  @base_dilations.deleter
  def base_dilations(self):
    del self.operation.attributes["base_dilations"]

  @builtins.property
  def window_dilations(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "window_dilations" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_dilations"]

  @window_dilations.setter
  def window_dilations(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["window_dilations"] = value
    elif "window_dilations" in self.operation.attributes:
      del self.operation.attributes["window_dilations"]

  @window_dilations.deleter
  def window_dilations(self):
    del self.operation.attributes["window_dilations"]

  @builtins.property
  def padding(self) -> _Optional[_ods_ir.DenseIntElementsAttr]:
    if "padding" not in self.operation.attributes:
      return None
    return self.operation.attributes["padding"]

  @padding.setter
  def padding(self, value: _Optional[_ods_ir.DenseIntElementsAttr]):
    if value is not None:
      self.operation.attributes["padding"] = value
    elif "padding" in self.operation.attributes:
      del self.operation.attributes["padding"]

  @padding.deleter
  def padding(self):
    del self.operation.attributes["padding"]

  @builtins.property
  def body(self) -> _ods_ir.Region:
    return self.regions[0]

def reduce_window(result, inputs, init_values, window_dimensions, *, window_strides=None, base_dilations=None, window_dilations=None, padding=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, ReduceWindowOp]:
  op = ReduceWindowOp(result=result, inputs=inputs, init_values=init_values, window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=base_dilations, window_dilations=window_dilations, padding=padding, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class RemOp(_ods_ir.OpView):
  r"""
  Performs element-wise remainder of dividend `lhs` and divisor `rhs` tensors
  and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#remainder
  
  Example:
  ```mlir
  %result = stablehlo.remainder %lhs, %rhs : tensor<4xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.remainder"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def remainder(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return RemOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ReplicaIdOp(_ods_ir.OpView):
  r"""
  Produces `replica_id` of the current process.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#replica_id
  
  Example:
  ```mlir
  %result = stablehlo.replica_id : tensor<ui32>
  ```
  """

  OPERATION_NAME = "stablehlo.replica_id"

  _ODS_REGIONS = (0, True)

  def __init__(self, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

def replica_id(*, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ReplicaIdOp(results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ReshapeOp(_ods_ir.OpView):
  r"""
  Performs reshape of `operand` tensor to a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reshape
  
  Example:
  ```mlir
  %result = stablehlo.reshape %operand : (tensor<2xf32>) -> tensor<1x2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.reshape"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

def reshape(result, operand, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return ReshapeOp(result=result, operand=operand, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ReturnOp(_ods_ir.OpView):
  OPERATION_NAME = "stablehlo.return"

  _ODS_REGIONS = (0, True)

  def __init__(self, results_, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(results_))
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def results_(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

def return_(results_, *, loc=None, ip=None) -> ReturnOp:
  return ReturnOp(results_=results_, loc=loc, ip=ip)

@_ods_cext.register_operation(_Dialect)
class ReverseOp(_ods_ir.OpView):
  r"""
  Reverses the order of elements in the `operand` along the specified
  `dimensions` and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#reverse
  
  Example:
  ```mlir
  %result = stablehlo.reverse %operand, dims = [1] : tensor<3x2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.reverse"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, dimensions, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimensions"] = (dimensions if (
    isinstance(dimensions, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(dimensions, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def dimensions(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["dimensions"]

  @dimensions.setter
  def dimensions(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimensions"] = value

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def reverse(operand, dimensions, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ReverseOp(operand=operand, dimensions=dimensions, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class RngBitGeneratorOp(_ods_ir.OpView):
  r"""
  Returns an `output` filled with uniform random data and an updated output
  state `output_state` given an initial state `initial_state` using the
  pseudorandom number generator algorithm `rng_algorithm`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#rng_bit_generator
  
  Example:
  ```mlir
  %output_state, %output = stablehlo.rng_bit_generator %initial_state, algorithm = THREE_FRY : (tensor<2xui64>) -> (tensor<2xui64>, tensor<2x2xui64>)
  ```
  """

  OPERATION_NAME = "stablehlo.rng_bit_generator"

  _ODS_REGIONS = (0, True)

  def __init__(self, output_state, output, rng_algorithm, initial_state, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(initial_state)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["rng_algorithm"] = (rng_algorithm if (
    isinstance(rng_algorithm, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_RngAlgorithmAttr')) else
      _ods_ir.AttrBuilder.get('StableHLO_RngAlgorithmAttr')(rng_algorithm, context=_ods_context))
    results = []
    results.append(output_state)
    results.append(output)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def initial_state(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rng_algorithm(self) -> _ods_ir.Attribute:
    return self.operation.attributes["rng_algorithm"]

  @rng_algorithm.setter
  def rng_algorithm(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["rng_algorithm"] = value

  @builtins.property
  def output_state(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

  @builtins.property
  def output(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[1]

def rng_bit_generator(output_state, output, rng_algorithm, initial_state, *, loc=None, ip=None) -> _ods_ir.OpResultList:
  return RngBitGeneratorOp(output_state=output_state, output=output, rng_algorithm=rng_algorithm, initial_state=initial_state, loc=loc, ip=ip).results

@_ods_cext.register_operation(_Dialect)
class RngOp(_ods_ir.OpView):
  r"""
  Generates random numbers using the `rng_distribution` algorithm and produces
  a `result` tensor of a given shape `shape`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#rng
  
  Example:
  ```mlir
  %result = stablehlo.rng %a, %b, %shape, distribution = NORMAL : (tensor<i32>, tensor<i32>, tensor<2xi64>) -> tensor<3x3xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.rng"

  _ODS_REGIONS = (0, True)

  def __init__(self, a, b, shape, rng_distribution, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(a)
    operands.append(b)
    operands.append(shape)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["rng_distribution"] = (rng_distribution if (
    isinstance(rng_distribution, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_RngDistributionAttr')) else
      _ods_ir.AttrBuilder.get('StableHLO_RngDistributionAttr')(rng_distribution, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def a(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def b(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def shape(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def rng_distribution(self) -> _ods_ir.Attribute:
    return self.operation.attributes["rng_distribution"]

  @rng_distribution.setter
  def rng_distribution(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["rng_distribution"] = value

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def rng(a, b, shape, rng_distribution, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return RngOp(a=a, b=b, shape=shape, rng_distribution=rng_distribution, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class RoundNearestEvenOp(_ods_ir.OpView):
  r"""
  Performs element-wise rounding towards the nearest integer, breaking ties
  towards the even integer, on the `operand` tensor and produces a `result`
  tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#round_nearest_even
  
  Example:
  ```mlir
  %result = stablehlo.round_nearest_even %operand : tensor<5xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.round_nearest_even"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def round_nearest_even(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return RoundNearestEvenOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class RoundOp(_ods_ir.OpView):
  r"""
  Performs element-wise rounding towards the nearest integer, breaking ties
  away from zero, on the `operand` tensor and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#round_nearest_afz
  
  Example:
  ```mlir
  %result = stablehlo.round_nearest_afz %operand : tensor<5xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.round_nearest_afz"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def round_nearest_afz(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return RoundOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class RsqrtOp(_ods_ir.OpView):
  r"""
  Performs element-wise reciprocal square root operation on `operand` tensor
  and produces a `result` tensor, implementing the `rSqrt` operation from the
  IEEE-754 specification.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#rsqrt
  
  Example:
  ```mlir
  %result = stablehlo.rsqrt %operand : tensor<2x2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.rsqrt"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def rsqrt(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return RsqrtOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ScatterOp(_ods_ir.OpView):
  r"""
   Produces `results` tensors which are equal to `inputs` tensors except that
   several slices specified by `scatter_indices` are updated with the values
   `updates` using `update_computation`.
  
   See:
   https://github.com/openxla/stablehlo/blob/main/docs/spec.md#scatter
  
  Example:
  ```mlir
  %result = "stablehlo.scatter"(%input, %scatter_indices, %update) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
      %0 = stablehlo.add %arg0, %arg1 : tensor<i64>
      stablehlo.return %0 : tensor<i64>
  }) {
    scatter_dimension_numbers = #stablehlo.scatter<
      update_window_dims = [3, 4],
      inserted_window_dims = [1],
      input_batching_dims = [0],
      scatter_indices_batching_dims = [1],
      scatter_dims_to_operand_dims = [2, 1],
      index_vector_dim = 3>,
    indices_are_sorted = false,
    unique_indices = false
  } : (tensor<2x3x4x2xi64>, tensor<2x2x3x2xi64>, tensor<2x2x3x2x2xi64>) -> tensor<2x3x4x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.scatter"

  _ODS_REGIONS = (1, True)

  def __init__(self, result, inputs, scatter_indices, updates, scatter_dimension_numbers, *, indices_are_sorted=None, unique_indices=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    operands.append(scatter_indices)
    operands.extend(_get_op_results_or_values(updates))
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["scatter_dimension_numbers"] = (scatter_dimension_numbers if (
    isinstance(scatter_dimension_numbers, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_ScatterDimensionNumbers')) else
      _ods_ir.AttrBuilder.get('StableHLO_ScatterDimensionNumbers')(scatter_dimension_numbers, context=_ods_context))
    if indices_are_sorted is not None: attributes["indices_are_sorted"] = (indices_are_sorted if (
        isinstance(indices_are_sorted, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(indices_are_sorted, context=_ods_context))
    if unique_indices is not None: attributes["unique_indices"] = (unique_indices if (
        isinstance(unique_indices, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(unique_indices, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    start, elements_per_group = _ods_equally_sized_accessor(self.operation.operands, 1, 2, 0, 0)
    return self.operation.operands[start:start + elements_per_group]

  @builtins.property
  def scatter_indices(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    start, elements_per_group = _ods_equally_sized_accessor(self.operation.operands, 1, 2, 0, 1)
    return self.operation.operands[start]

  @builtins.property
  def updates(self) -> _ods_ir.OpOperandList:
    start, elements_per_group = _ods_equally_sized_accessor(self.operation.operands, 1, 2, 1, 1)
    return self.operation.operands[start:start + elements_per_group]

  @builtins.property
  def scatter_dimension_numbers(self) -> _ods_ir.Attribute:
    return self.operation.attributes["scatter_dimension_numbers"]

  @scatter_dimension_numbers.setter
  def scatter_dimension_numbers(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["scatter_dimension_numbers"] = value

  @builtins.property
  def indices_are_sorted(self) -> _Optional[_ods_ir.BoolAttr]:
    if "indices_are_sorted" not in self.operation.attributes:
      return None
    return self.operation.attributes["indices_are_sorted"]

  @indices_are_sorted.setter
  def indices_are_sorted(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["indices_are_sorted"] = value
    elif "indices_are_sorted" in self.operation.attributes:
      del self.operation.attributes["indices_are_sorted"]

  @indices_are_sorted.deleter
  def indices_are_sorted(self):
    del self.operation.attributes["indices_are_sorted"]

  @builtins.property
  def unique_indices(self) -> _Optional[_ods_ir.BoolAttr]:
    if "unique_indices" not in self.operation.attributes:
      return None
    return self.operation.attributes["unique_indices"]

  @unique_indices.setter
  def unique_indices(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["unique_indices"] = value
    elif "unique_indices" in self.operation.attributes:
      del self.operation.attributes["unique_indices"]

  @unique_indices.deleter
  def unique_indices(self):
    del self.operation.attributes["unique_indices"]

  @builtins.property
  def update_computation(self) -> _ods_ir.Region:
    return self.regions[0]

def scatter(result, inputs, scatter_indices, updates, scatter_dimension_numbers, *, indices_are_sorted=None, unique_indices=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, ScatterOp]:
  op = ScatterOp(result=result, inputs=inputs, scatter_indices=scatter_indices, updates=updates, scatter_dimension_numbers=scatter_dimension_numbers, indices_are_sorted=indices_are_sorted, unique_indices=unique_indices, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class SelectAndScatterOp(_ods_ir.OpView):
  r"""
  Scatters the values from the `source` tensor using `scatter` based on the
  outcome of `reduce_window` of the `input` tensor using `select` and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#select_and_scatter
  
  Example:
  ```mlir
  %result = "stablehlo.select_and_scatter"(%operand, %source, %init_value) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
      %0 = stablehlo.compare GE, %arg0, %arg1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
      stablehlo.return %0 : tensor<i1>
  }, {
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>):
      %0 = stablehlo.add %arg0, %arg1 : tensor<i64>
      stablehlo.return %0 : tensor<i64>
  }) {
    window_dimensions = array<i64: [3, 1]>,
    window_strides = array<i64: [2, 1]>,
    padding = dense<[[0, 1], [0, 0]]> : tensor<2x2xi64>
  } : (tensor<4x2xi64>, tensor<2x2xi64>, tensor<i64>) -> tensor<4x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.select_and_scatter"

  _ODS_REGIONS = (2, True)

  def __init__(self, result, operand, source, init_value, *, window_dimensions=None, window_strides=None, padding=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(source)
    operands.append(init_value)
    _ods_context = _ods_get_default_loc_context(loc)
    if window_dimensions is not None: attributes["window_dimensions"] = (window_dimensions if (
        isinstance(window_dimensions, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(window_dimensions, context=_ods_context))
    if window_strides is not None: attributes["window_strides"] = (window_strides if (
        isinstance(window_strides, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
          _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(window_strides, context=_ods_context))
    if padding is not None: attributes["padding"] = (padding if (
        isinstance(padding, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
          _ods_ir.AttrBuilder.get('I64ElementsAttr')(padding, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def source(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def init_value(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def window_dimensions(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "window_dimensions" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_dimensions"]

  @window_dimensions.setter
  def window_dimensions(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["window_dimensions"] = value
    elif "window_dimensions" in self.operation.attributes:
      del self.operation.attributes["window_dimensions"]

  @window_dimensions.deleter
  def window_dimensions(self):
    del self.operation.attributes["window_dimensions"]

  @builtins.property
  def window_strides(self) -> _Optional[_ods_ir.DenseI64ArrayAttr]:
    if "window_strides" not in self.operation.attributes:
      return None
    return self.operation.attributes["window_strides"]

  @window_strides.setter
  def window_strides(self, value: _Optional[_ods_ir.DenseI64ArrayAttr]):
    if value is not None:
      self.operation.attributes["window_strides"] = value
    elif "window_strides" in self.operation.attributes:
      del self.operation.attributes["window_strides"]

  @window_strides.deleter
  def window_strides(self):
    del self.operation.attributes["window_strides"]

  @builtins.property
  def padding(self) -> _Optional[_ods_ir.DenseIntElementsAttr]:
    if "padding" not in self.operation.attributes:
      return None
    return self.operation.attributes["padding"]

  @padding.setter
  def padding(self, value: _Optional[_ods_ir.DenseIntElementsAttr]):
    if value is not None:
      self.operation.attributes["padding"] = value
    elif "padding" in self.operation.attributes:
      del self.operation.attributes["padding"]

  @padding.deleter
  def padding(self):
    del self.operation.attributes["padding"]

  @builtins.property
  def select(self) -> _ods_ir.Region:
    return self.regions[0]

  @builtins.property
  def scatter(self) -> _ods_ir.Region:
    return self.regions[1]

def select_and_scatter(result, operand, source, init_value, *, window_dimensions=None, window_strides=None, padding=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SelectAndScatterOp(result=result, operand=operand, source=source, init_value=init_value, window_dimensions=window_dimensions, window_strides=window_strides, padding=padding, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SelectOp(_ods_ir.OpView):
  r"""
  Produces a `result` tensor where each element is selected from `on_true` or
  `on_false` tensor based on the value of the corresponding element of `pred`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#select
  
  Example:
  ```mlir
  %result = stablehlo.select %pred, %on_true, %on_false : tensor<2x2xi1>, tensor<2x2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.select"

  _ODS_REGIONS = (0, True)

  def __init__(self, pred, on_true, on_false, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(pred)
    operands.append(on_true)
    operands.append(on_false)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def pred(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def on_true(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def on_false(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[2]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def select(pred, on_true, on_false, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SelectOp(pred=pred, on_true=on_true, on_false=on_false, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SendOp(_ods_ir.OpView):
  r"""
  Sends `inputs` to a channel `channel_id` and produces a `result` token.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#send
  
  Example:
  ```mlir
  %result = "stablehlo.send"(%operand, %token) {
    channel_handle = #stablehlo.channel_handle<handle = 0, type = 1>,
    is_host_transfer = false,
    source_target_pairs = dense<[[0, 1], [1, 2]]> : tensor<2x2xi64>
  } : (tensor<2x2xi64>, !stablehlo.token) -> !stablehlo.token
  ```
  """

  OPERATION_NAME = "stablehlo.send"

  _ODS_REGIONS = (0, True)

  def __init__(self, inputs, token, channel_handle, *, is_host_transfer=None, source_target_pairs=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    operands.append(token)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["channel_handle"] = (channel_handle if (
    isinstance(channel_handle, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_ChannelHandle')) else
      _ods_ir.AttrBuilder.get('StableHLO_ChannelHandle')(channel_handle, context=_ods_context))
    if is_host_transfer is not None: attributes["is_host_transfer"] = (is_host_transfer if (
        isinstance(is_host_transfer, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(is_host_transfer, context=_ods_context))
    if source_target_pairs is not None: attributes["source_target_pairs"] = (source_target_pairs if (
        isinstance(source_target_pairs, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('I64ElementsAttr')) else
          _ods_ir.AttrBuilder.get('I64ElementsAttr')(source_target_pairs, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 2 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def token(self) -> _ods_ir.Value:
    _ods_variadic_group_length = len(self.operation.operands) - 2 + 1
    return self.operation.operands[1 + _ods_variadic_group_length - 1]

  @builtins.property
  def channel_handle(self) -> _ods_ir.Attribute:
    return self.operation.attributes["channel_handle"]

  @channel_handle.setter
  def channel_handle(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["channel_handle"] = value

  @builtins.property
  def is_host_transfer(self) -> _Optional[_ods_ir.BoolAttr]:
    if "is_host_transfer" not in self.operation.attributes:
      return None
    return self.operation.attributes["is_host_transfer"]

  @is_host_transfer.setter
  def is_host_transfer(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["is_host_transfer"] = value
    elif "is_host_transfer" in self.operation.attributes:
      del self.operation.attributes["is_host_transfer"]

  @is_host_transfer.deleter
  def is_host_transfer(self):
    del self.operation.attributes["is_host_transfer"]

  @builtins.property
  def source_target_pairs(self) -> _Optional[_ods_ir.DenseIntElementsAttr]:
    if "source_target_pairs" not in self.operation.attributes:
      return None
    return self.operation.attributes["source_target_pairs"]

  @source_target_pairs.setter
  def source_target_pairs(self, value: _Optional[_ods_ir.DenseIntElementsAttr]):
    if value is not None:
      self.operation.attributes["source_target_pairs"] = value
    elif "source_target_pairs" in self.operation.attributes:
      del self.operation.attributes["source_target_pairs"]

  @source_target_pairs.deleter
  def source_target_pairs(self):
    del self.operation.attributes["source_target_pairs"]

def send(inputs, token, channel_handle, *, is_host_transfer=None, source_target_pairs=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SendOp(inputs=inputs, token=token, channel_handle=channel_handle, is_host_transfer=is_host_transfer, source_target_pairs=source_target_pairs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SetDimensionSizeOp(_ods_ir.OpView):
  r"""
  This operation is a work in progress, so it is not yet included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/8.
  
  Informally, this operation does the same thing as XLA's SetDimensionSize:
  https://www.tensorflow.org/xla/operation_semantics#setdimensionsize
  
  Example:
  ```mlir
  %0 = stablehlo.set_dimension_size %arg0, %arg1, dim = 1 : (tensor<4x2xf32>, tensor<i32>) -> tensor<4x2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.set_dimension_size"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, size, dimension, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(size)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dimension"] = (dimension if (
    isinstance(dimension, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(dimension, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def size(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def dimension(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["dimension"]

  @dimension.setter
  def dimension(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dimension"] = value

def set_dimension_size(operand, size, dimension, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SetDimensionSizeOp(operand=operand, size=size, dimension=dimension, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ShiftLeftOp(_ods_ir.OpView):
  r"""
  Performs element-wise left-shift operation on the `lhs` tensor by `rhs`
  number of bits and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#shift_left
  
  Example:
  ```mlir
  %result = stablehlo.shift_left %lhs, %rhs : tensor<3xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.shift_left"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def shift_left(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ShiftLeftOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ShiftRightArithmeticOp(_ods_ir.OpView):
  r"""
  Performs element-wise arithmetic right-shift operation on the `lhs` tensor
  by `rhs` number of bits and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#shift_right_arithmetic
  
  Example:
  ```mlir
  %result = stablehlo.shift_right_arithmetic %lhs, %rhs : tensor<3xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.shift_right_arithmetic"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def shift_right_arithmetic(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ShiftRightArithmeticOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class ShiftRightLogicalOp(_ods_ir.OpView):
  r"""
  Performs element-wise logical right-shift operation on the `lhs` tensor by
  `rhs` number of bits and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#shift_right_logical
  
  Example:
  ```mlir
  %result = stablehlo.shift_right_logical %lhs, %rhs : tensor<3xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.shift_right_logical"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def shift_right_logical(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return ShiftRightLogicalOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SignOp(_ods_ir.OpView):
  r"""
  Returns the sign of the `operand` element-wise and produces a `result`
  tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#sign
  
  Example:
  ```mlir
  %result = stablehlo.sign %operand : tensor<5xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.sign"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def sign(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SignOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SineOp(_ods_ir.OpView):
  r"""
  Performs element-wise sine operation on `operand` tensor and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#sine
  
  Example:
  ```mlir
  %result = stablehlo.sine %operand : tensor<2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.sine"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def sine(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SineOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SliceOp(_ods_ir.OpView):
  r"""
  Extracts a slice from the `operand` using statically-computed starting
  indices and produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#slice
  
  Example:
  ```mlir
  %result = stablehlo.slice %operand [1:3, 4:8:2]
     : (tensor<3x8xi64>) -> tensor<2x2xi64>
  
  // Same in generic form: the `1:3` above is mapped to the first entry in
  // `start_indices` and `limit_indices`, while `strides` is implicitly 1.
  // The `4:8:2` above is parsed into the second entry of `start_indices`,
  // `limit_indices` and `strides` respectively.
  %result = "stablehlo.slice" (%operand) {
    start_indices = array<i64: 1, 4>,
    limit_indices = array<i64: 3, 8>,
    strides = array<i64: 1, 2>
  } : (tensor<3x8xi64>) -> tensor<2x2xi64>
  ```
  """

  OPERATION_NAME = "stablehlo.slice"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, start_indices, limit_indices, strides, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["start_indices"] = (start_indices if (
    isinstance(start_indices, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(start_indices, context=_ods_context))
    attributes["limit_indices"] = (limit_indices if (
    isinstance(limit_indices, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(limit_indices, context=_ods_context))
    attributes["strides"] = (strides if (
    isinstance(strides, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(strides, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def start_indices(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["start_indices"]

  @start_indices.setter
  def start_indices(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["start_indices"] = value

  @builtins.property
  def limit_indices(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["limit_indices"]

  @limit_indices.setter
  def limit_indices(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["limit_indices"] = value

  @builtins.property
  def strides(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["strides"]

  @strides.setter
  def strides(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["strides"] = value

def slice(operand, start_indices, limit_indices, strides, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SliceOp(operand=operand, start_indices=start_indices, limit_indices=limit_indices, strides=strides, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SortOp(_ods_ir.OpView):
  r"""
  Sorts a variadic number of tensors in `inputs` together, according to a
  custom `comparator`, along the given `dimension` and produces a variadic
  number of tensors as `results`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#sort
  
  Example:
  ```mlir
  %result0, %result1 = "stablehlo.sort"(%input0, %input1) ({
    ^bb0(%arg0: tensor<i64>, %arg1: tensor<i64>, %arg2: tensor<i64>, %arg3: tensor<i64>):
      %predicate = stablehlo.compare GT, %arg0, %arg1 : (tensor<i64>, tensor<i64>) -> tensor<i1>
      stablehlo.return %predicate : tensor<i1>
  }) {
    dimension = 0 : i64,
    is_stable = true
  } : (tensor<2x3xi64>, tensor<2x3xi64>) -> (tensor<2x3xi64>, tensor<2x3xi64>)
  """

  OPERATION_NAME = "stablehlo.sort"

  _ODS_REGIONS = (1, True)

  def __init__(self, result, inputs, *, dimension=None, is_stable=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(inputs))
    _ods_context = _ods_get_default_loc_context(loc)
    if dimension is not None: attributes["dimension"] = (dimension if (
        isinstance(dimension, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('I64Attr')) else
          _ods_ir.AttrBuilder.get('I64Attr')(dimension, context=_ods_context))
    if is_stable is not None: attributes["is_stable"] = (is_stable if (
        isinstance(is_stable, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('BoolAttr')) else
          _ods_ir.AttrBuilder.get('BoolAttr')(is_stable, context=_ods_context))
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def inputs(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def dimension(self) -> _Optional[_ods_ir.IntegerAttr]:
    if "dimension" not in self.operation.attributes:
      return None
    return self.operation.attributes["dimension"]

  @dimension.setter
  def dimension(self, value: _Optional[_ods_ir.IntegerAttr]):
    if value is not None:
      self.operation.attributes["dimension"] = value
    elif "dimension" in self.operation.attributes:
      del self.operation.attributes["dimension"]

  @dimension.deleter
  def dimension(self):
    del self.operation.attributes["dimension"]

  @builtins.property
  def is_stable(self) -> _Optional[_ods_ir.BoolAttr]:
    if "is_stable" not in self.operation.attributes:
      return None
    return self.operation.attributes["is_stable"]

  @is_stable.setter
  def is_stable(self, value: _Optional[_ods_ir.BoolAttr]):
    if value is not None:
      self.operation.attributes["is_stable"] = value
    elif "is_stable" in self.operation.attributes:
      del self.operation.attributes["is_stable"]

  @is_stable.deleter
  def is_stable(self):
    del self.operation.attributes["is_stable"]

  @builtins.property
  def comparator(self) -> _ods_ir.Region:
    return self.regions[0]

def sort(result, inputs, *, dimension=None, is_stable=None, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, SortOp]:
  op = SortOp(result=result, inputs=inputs, dimension=dimension, is_stable=is_stable, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class SqrtOp(_ods_ir.OpView):
  r"""
  Performs element-wise square root operation on `operand` tensor and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#sqrt
  
  Example:
  ```mlir
  %result = stablehlo.sqrt %operand : tensor<2x2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.sqrt"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def sqrt(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SqrtOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class SubtractOp(_ods_ir.OpView):
  r"""
  Performs element-wise subtraction of two tensors `lhs` and `rhs` and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#subtract
  
  Example:
  ```mlir
  %result = stablehlo.subtract %lhs, %rhs : tensor<2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.subtract"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def subtract(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return SubtractOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class TanOp(_ods_ir.OpView):
  r"""
  Performs element-wise tangent operation on `operand` tensor and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#tan
  
  Example:
  ```mlir
  %result = stablehlo.tan %operand : tensor<2x2xf64>
  ```
  """

  OPERATION_NAME = "stablehlo.tan"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def tan(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return TanOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class TanhOp(_ods_ir.OpView):
  r"""
  Performs element-wise hyperbolic tangent operation on `operand` tensor and
  produces a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#tanh
  
  Example:
  ```mlir
  %result = stablehlo.tanh %operand : tensor<2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.tanh"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, result_accuracy=None, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    if result_accuracy is not None: attributes["result_accuracy"] = (result_accuracy if (
        isinstance(result_accuracy, _ods_ir.Attribute) or
        not _ods_ir.AttrBuilder.contains('StableHLO_ResultAccuracyAttr')) else
          _ods_ir.AttrBuilder.get('StableHLO_ResultAccuracyAttr')(result_accuracy, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result_accuracy(self) -> _Optional[_ods_ir.Attribute]:
    if "result_accuracy" not in self.operation.attributes:
      return None
    return self.operation.attributes["result_accuracy"]

  @result_accuracy.setter
  def result_accuracy(self, value: _Optional[_ods_ir.Attribute]):
    if value is not None:
      self.operation.attributes["result_accuracy"] = value
    elif "result_accuracy" in self.operation.attributes:
      del self.operation.attributes["result_accuracy"]

  @result_accuracy.deleter
  def result_accuracy(self):
    del self.operation.attributes["result_accuracy"]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def tanh(operand, *, result_accuracy=None, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return TanhOp(operand=operand, result_accuracy=result_accuracy, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class TorchIndexSelectOp(_ods_ir.OpView):
  r"""
  This operation is on its way out of StableHLO, so it is not included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/3.
  
  Informally, this operation does the same thing as PyTorch's index_select,
  augmented with support for batch dimensions:
  https://pytorch.org/docs/stable/generated/torch.index_select.html.
  
  The `batch_dims` attribute specifies the number of major batch dimensions
  (0 or more) that act like a multidimensional loop over both the operand and
  the index.
  
  Example:
  ```mlir
  %result = "stablehlo.torch_index_select"(%operand, %index) {
    dim = 2 : i64,
    batch_dims = 1 : i64
  } : (tensor<8x128x3072x64xf32>, tensor<8x16x1024xi32>) -> tensor<8x128x16x1024x64xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.torch_index_select"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, index, dim, batch_dims, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    operands.append(index)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["dim"] = (dim if (
    isinstance(dim, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(dim, context=_ods_context))
    attributes["batch_dims"] = (batch_dims if (
    isinstance(batch_dims, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('I64Attr')) else
      _ods_ir.AttrBuilder.get('I64Attr')(batch_dims, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def index(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def dim(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["dim"]

  @dim.setter
  def dim(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["dim"] = value

  @builtins.property
  def batch_dims(self) -> _ods_ir.IntegerAttr:
    return self.operation.attributes["batch_dims"]

  @batch_dims.setter
  def batch_dims(self, value: _ods_ir.IntegerAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["batch_dims"] = value

def torch_index_select(result, operand, index, dim, batch_dims, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return TorchIndexSelectOp(result=result, operand=operand, index=index, dim=dim, batch_dims=batch_dims, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class TransposeOp(_ods_ir.OpView):
  r"""
  Permutes the dimensions of `operand` tensor using `permutation` and produces
  a `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#transpose
  
  Example:
  ```mlir
  %0 = stablehlo.transpose %arg0, dims = [2, 1, 0] : (tensor<1x2x3xi32>) -> tensor<3x2x1xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.transpose"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, permutation, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["permutation"] = (permutation if (
    isinstance(permutation, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('DenseI64ArrayAttr')) else
      _ods_ir.AttrBuilder.get('DenseI64ArrayAttr')(permutation, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def permutation(self) -> _ods_ir.DenseI64ArrayAttr:
    return self.operation.attributes["permutation"]

  @permutation.setter
  def permutation(self, value: _ods_ir.DenseI64ArrayAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["permutation"] = value

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def transpose(operand, permutation, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return TransposeOp(operand=operand, permutation=permutation, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class TriangularSolveOp(_ods_ir.OpView):
  r"""
  Solves batches of systems of linear equations with lower or upper triangular
  coefficient matrices.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#triangular_solve
  
  Example:
  ```mlir
  %result = "stablehlo.triangular_solve"(%a, %b) {
    left_side = true,
    lower = true,
    unit_diagonal = false,
    transpose_a = #stablehlo<transpose NO_TRANSPOSE>
  } : (tensor<3x3xf32>, tensor<3x3xf32>) -> tensor<3x3xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.triangular_solve"

  _ODS_REGIONS = (0, True)

  def __init__(self, a, b, left_side, lower, unit_diagonal, transpose_a, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(a)
    operands.append(b)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["left_side"] = (left_side if (
    isinstance(left_side, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('BoolAttr')) else
      _ods_ir.AttrBuilder.get('BoolAttr')(left_side, context=_ods_context))
    attributes["lower"] = (lower if (
    isinstance(lower, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('BoolAttr')) else
      _ods_ir.AttrBuilder.get('BoolAttr')(lower, context=_ods_context))
    attributes["unit_diagonal"] = (unit_diagonal if (
    isinstance(unit_diagonal, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('BoolAttr')) else
      _ods_ir.AttrBuilder.get('BoolAttr')(unit_diagonal, context=_ods_context))
    attributes["transpose_a"] = (transpose_a if (
    isinstance(transpose_a, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StableHLO_TransposeAttr')) else
      _ods_ir.AttrBuilder.get('StableHLO_TransposeAttr')(transpose_a, context=_ods_context))
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def a(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def b(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def left_side(self) -> _ods_ir.BoolAttr:
    return self.operation.attributes["left_side"]

  @left_side.setter
  def left_side(self, value: _ods_ir.BoolAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["left_side"] = value

  @builtins.property
  def lower(self) -> _ods_ir.BoolAttr:
    return self.operation.attributes["lower"]

  @lower.setter
  def lower(self, value: _ods_ir.BoolAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["lower"] = value

  @builtins.property
  def unit_diagonal(self) -> _ods_ir.BoolAttr:
    return self.operation.attributes["unit_diagonal"]

  @unit_diagonal.setter
  def unit_diagonal(self, value: _ods_ir.BoolAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["unit_diagonal"] = value

  @builtins.property
  def transpose_a(self) -> _ods_ir.Attribute:
    return self.operation.attributes["transpose_a"]

  @transpose_a.setter
  def transpose_a(self, value: _ods_ir.Attribute):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["transpose_a"] = value

def triangular_solve(a, b, left_side, lower, unit_diagonal, transpose_a, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return TriangularSolveOp(a=a, b=b, left_side=left_side, lower=lower, unit_diagonal=unit_diagonal, transpose_a=transpose_a, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class TupleOp(_ods_ir.OpView):
  r"""
  Produces a `result` tuple from values `val`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#tuple
  
  Example:
  ```mlir
  %result = stablehlo.tuple %val0, %val1 : tuple<tensor<2xf64>, tuple<tensor<i64>>>
  ```
  """

  OPERATION_NAME = "stablehlo.tuple"

  _ODS_REGIONS = (0, True)

  def __init__(self, val, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(val))
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def val(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def result(self) -> _ods_ir.OpResult:
    return self.operation.results[0]

def tuple(val, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return TupleOp(val=val, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class UnaryEinsumOp(_ods_ir.OpView):
  r"""
  This operation is on its way out of StableHLO, so it is not included in
  the StableHLO specification: https://github.com/openxla/stablehlo/issues/3.
  
  Informally, this operation does the same thing as TF's einsum:
  https://www.tensorflow.org/api_docs/python/tf/einsum
  
  Example:
  ```mlir
  %result = "stablehlo.unary_einsum"(%operand) {
    einsum_config = "ab->a"
  } : (tensor<4x16xf32>) -> tensor<4xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.unary_einsum"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, einsum_config, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    attributes["einsum_config"] = (einsum_config if (
    isinstance(einsum_config, _ods_ir.Attribute) or
    not _ods_ir.AttrBuilder.contains('StrAttr')) else
      _ods_ir.AttrBuilder.get('StrAttr')(einsum_config, context=_ods_context))
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def einsum_config(self) -> _ods_ir.StringAttr:
    return self.operation.attributes["einsum_config"]

  @einsum_config.setter
  def einsum_config(self, value: _ods_ir.StringAttr):
    if value is None:
      raise ValueError("'None' not allowed as value for mandatory attributes")
    self.operation.attributes["einsum_config"] = value

def unary_einsum(result, operand, einsum_config, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return UnaryEinsumOp(result=result, operand=operand, einsum_config=einsum_config, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class UniformDequantizeOp(_ods_ir.OpView):
  r"""
  Performs element-wise conversion of quantized tensor `operand` to a
  floating-point tensor `result` according to the quantization parameters
  defined by the `operand` type.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#uniform_dequantize
  
  Example:
  ```mlir
  %result = stablehlo.uniform_dequantize %operand : (tensor<2x!quant.uniform<i8:f32:0, {0.1:-30,0.5:-20}>>) -> tensor<2xf32>
  ```
  """

  OPERATION_NAME = "stablehlo.uniform_dequantize"

  _ODS_REGIONS = (0, True)

  def __init__(self, operand, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def uniform_dequantize(operand, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return UniformDequantizeOp(operand=operand, results=results, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class UniformQuantizeOp(_ods_ir.OpView):
  r"""
  Performs element-wise conversion of floating-point tensor or quantized
  tensor `operand` to a quantized tensor `result` according to the
  quantization parameters defined by the `result` type.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#uniform_quantize
  
  Example:
  ```mlir
  %result = stablehlo.uniform_quantize %operand : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32:0, {0.1:-30,0.5:-20}>>
  ```
  """

  OPERATION_NAME = "stablehlo.uniform_quantize"

  _ODS_REGIONS = (0, True)

  def __init__(self, result, operand, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(operand)
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.append(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def uniform_quantize(result, operand, *, loc=None, ip=None) -> _ods_ir.OpResult:
  return UniformQuantizeOp(result=result, operand=operand, loc=loc, ip=ip).result

@_ods_cext.register_operation(_Dialect)
class WhileOp(_ods_ir.OpView):
  r"""
  Produces the output from executing `body` function 0 or more times while the
  `cond` function outputs `true`.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#while
  
  Example:
  ```mlir
  %results0, %results1 = stablehlo.while(%arg0 = %init_i, %arg1 = %init_sum) : tensor<i64>, tensor<i64>
  cond {
    %cond = stablehlo.compare LT, %arg0, %ten : (tensor<i64>, tensor<i64>) -> tensor<i1>
    stablehlo.return %cond : tensor<i1>
  } do {
    %new_sum = stablehlo.add %arg1, %one : tensor<i64>
    %new_i = stablehlo.add %arg0, %one : tensor<i64>
    stablehlo.return %new_i, %new_sum : tensor<i64>, tensor<i64>
  }
  ```
  """

  OPERATION_NAME = "stablehlo.while"

  _ODS_REGIONS = (2, True)

  def __init__(self, result, operand, *, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.extend(_get_op_results_or_values(operand))
    _ods_context = _ods_get_default_loc_context(loc)
    results = []
    results.extend(result)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def operand(self) -> _ods_ir.OpOperandList:
    _ods_variadic_group_length = len(self.operation.operands) - 1 + 1
    return self.operation.operands[0:0 + _ods_variadic_group_length]

  @builtins.property
  def cond(self) -> _ods_ir.Region:
    return self.regions[0]

  @builtins.property
  def body(self) -> _ods_ir.Region:
    return self.regions[1]

def while_(result, operand, *, loc=None, ip=None) -> _Union[_ods_ir.OpResult, _ods_ir.OpResultList, WhileOp]:
  op = WhileOp(result=result, operand=operand, loc=loc, ip=ip); results = op.results
  return results if len(results) > 1 else (results[0] if len(results) == 1 else op)

@_ods_cext.register_operation(_Dialect)
class XorOp(_ods_ir.OpView):
  r"""
  Performs element-wise XOR of two tensors `lhs` and `rhs` and produces a
  `result` tensor.
  
  See:
  https://github.com/openxla/stablehlo/blob/main/docs/spec.md#xor
  
  Example:
  ```mlir
  %result = stablehlo.xor %lhs, %rhs : tensor<2xi32>
  ```
  """

  OPERATION_NAME = "stablehlo.xor"

  _ODS_REGIONS = (0, True)

  def __init__(self, lhs, rhs, *, results=None, loc=None, ip=None):
    operands = []
    attributes = {}
    regions = None
    operands.append(lhs)
    operands.append(rhs)
    _ods_context = _ods_get_default_loc_context(loc)
    _ods_successors = None
    super().__init__(self.OPERATION_NAME, self._ODS_REGIONS, self._ODS_OPERAND_SEGMENTS, self._ODS_RESULT_SEGMENTS, attributes=attributes, results=results, operands=operands, successors=_ods_successors, regions=regions, loc=loc, ip=ip)

  @builtins.property
  def lhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[0]

  @builtins.property
  def rhs(self) -> _ods_ir.Value[_ods_ir.RankedTensorType]:
    return self.operation.operands[1]

  @builtins.property
  def result(self) -> _ods_ir.OpResult[_ods_ir.RankedTensorType]:
    return self.operation.results[0]

def xor(lhs, rhs, *, results=None, loc=None, ip=None) -> _ods_ir.OpResult:
  return XorOp(lhs=lhs, rhs=rhs, results=results, loc=loc, ip=ip).result
