Skip to content

Commit 0545186

Browse files
committed
format
1 parent 15e92d0 commit 0545186

File tree

6 files changed

+75
-57
lines changed

6 files changed

+75
-57
lines changed

mlir/lib/Bindings/Python/Globals.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ class PyGlobals {
7777
pybind11::object pyClass);
7878

7979
/// Adds a concrete implementation operation class.
80-
/// Raises an exception if the mapping already exists.
80+
/// Raises an exception if the mapping already exists and replace == false.
8181
/// This is intended to be called by implementation code.
8282
void registerOperationImpl(const std::string &operationName,
8383
pybind11::object pyClass, bool replace = false);

mlir/python/mlir/dialects/arith.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,10 @@
88

99
try:
1010
from ..ir import *
11-
from ._ods_common import get_default_loc_context as _get_default_loc_context, _cext as _ods_cext
11+
from ._ods_common import (
12+
get_default_loc_context as _get_default_loc_context,
13+
_cext as _ods_cext,
14+
)
1215

1316
from typing import Any, List, Union
1417
except ImportError as e:
@@ -34,6 +37,7 @@ def _is_integer_like_type(type: Type):
3437
def _is_float_type(type: Type):
3538
return _is_any_of(type, [BF16Type, F16Type, F32Type, F64Type])
3639

40+
3741
@_ods_cext.register_operation(_Dialect, replace=True)
3842
class ConstantOp(ConstantOp):
3943
"""Specialization for the constant op class."""

mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py

Lines changed: 58 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -296,35 +296,39 @@ def quantized_matmul(
296296

297297

298298
@linalg_structured_op
299-
def matmul_transpose_a(A=TensorDef(T1, S.K, S.N),
300-
B=TensorDef(T2, S.K, S.M),
301-
C=TensorDef(U, S.M, S.N, output=True),
302-
cast=TypeFnAttrDef(default=TypeFn.cast_signed)):
303-
"""Performs a matrix multiplication of two 2D inputs with lhs operand
304-
transposed.
299+
def matmul_transpose_a(
300+
A=TensorDef(T1, S.K, S.N),
301+
B=TensorDef(T2, S.K, S.M),
302+
C=TensorDef(U, S.M, S.N, output=True),
303+
cast=TypeFnAttrDef(default=TypeFn.cast_signed),
304+
):
305+
"""Performs a matrix multiplication of two 2D inputs with lhs operand
306+
transposed.
305307
306-
Numeric casting is performed on the operands to the inner multiply, promoting
307-
them to the same data type as the accumulator/output.
308-
"""
309-
domain(D.m, D.n, D.k)
310-
implements(ContractionOpInterface)
311-
C[D.m, D.n] += cast(U, A[D.k, D.m]) * cast(U, B[D.k, D.n])
308+
Numeric casting is performed on the operands to the inner multiply, promoting
309+
them to the same data type as the accumulator/output.
310+
"""
311+
domain(D.m, D.n, D.k)
312+
implements(ContractionOpInterface)
313+
C[D.m, D.n] += cast(U, A[D.k, D.m]) * cast(U, B[D.k, D.n])
312314

313315

314316
@linalg_structured_op
315-
def matmul_transpose_b(A=TensorDef(T1, S.M, S.K),
316-
B=TensorDef(T2, S.N, S.K),
317-
C=TensorDef(U, S.M, S.N, output=True),
318-
cast=TypeFnAttrDef(default=TypeFn.cast_signed)):
319-
"""Performs a matrix multiplication of two 2D inputs with rhs operand
320-
transposed.
317+
def matmul_transpose_b(
318+
A=TensorDef(T1, S.M, S.K),
319+
B=TensorDef(T2, S.N, S.K),
320+
C=TensorDef(U, S.M, S.N, output=True),
321+
cast=TypeFnAttrDef(default=TypeFn.cast_signed),
322+
):
323+
"""Performs a matrix multiplication of two 2D inputs with rhs operand
324+
transposed.
321325
322-
Numeric casting is performed on the operands to the inner multiply, promoting
323-
them to the same data type as the accumulator/output.
324-
"""
325-
domain(D.m, D.n, D.k)
326-
implements(ContractionOpInterface)
327-
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.n, D.k])
326+
Numeric casting is performed on the operands to the inner multiply, promoting
327+
them to the same data type as the accumulator/output.
328+
"""
329+
domain(D.m, D.n, D.k)
330+
implements(ContractionOpInterface)
331+
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.n, D.k])
328332

329333

330334
@linalg_structured_op
@@ -390,36 +394,41 @@ def batch_matmul(
390394

391395

392396
@linalg_structured_op
393-
def batch_matmul_transpose_a(A=TensorDef(T1, Batch, S.K, S.M),
394-
B=TensorDef(T2, Batch, S.K, S.N),
395-
C=TensorDef(U, Batch, S.M, S.N, output=True)):
396-
"""Performs a batched matrix multiplication of two 3D inputs where lhs operand
397-
has its non-batch dimensions transposed.
397+
def batch_matmul_transpose_a(
398+
A=TensorDef(T1, Batch, S.K, S.M),
399+
B=TensorDef(T2, Batch, S.K, S.N),
400+
C=TensorDef(U, Batch, S.M, S.N, output=True),
401+
):
402+
"""Performs a batched matrix multiplication of two 3D inputs where lhs operand
403+
has its non-batch dimensions transposed.
398404
399-
Numeric casting is performed on the operands to the inner multiply, promoting
400-
them to the same data type as the accumulator/output.
401-
"""
402-
domain(D.b, D.m, D.n, D.k)
403-
implements(ContractionOpInterface)
404-
C[D.b, D.m, D.n] += TypeFn.cast_signed(U, A[D.b, D.k, D.m]) \
405-
* TypeFn.cast_signed(U, B[D.b, D.k, D.n])
405+
Numeric casting is performed on the operands to the inner multiply, promoting
406+
them to the same data type as the accumulator/output.
407+
"""
408+
domain(D.b, D.m, D.n, D.k)
409+
implements(ContractionOpInterface)
410+
C[D.b, D.m, D.n] += TypeFn.cast_signed(U, A[D.b, D.k, D.m]) * TypeFn.cast_signed(
411+
U, B[D.b, D.k, D.n]
412+
)
406413

407414

408415
@linalg_structured_op
409-
def batch_matmul_transpose_b(A=TensorDef(T1, Batch, S.M, S.K),
410-
B=TensorDef(T2, Batch, S.N, S.K),
411-
C=TensorDef(U, Batch, S.M, S.N, output=True)):
412-
"""Performs a batched matrix multiplication of two 3D inputs where rhs operand
413-
has its non-batch dimensions transposed.
416+
def batch_matmul_transpose_b(
417+
A=TensorDef(T1, Batch, S.M, S.K),
418+
B=TensorDef(T2, Batch, S.N, S.K),
419+
C=TensorDef(U, Batch, S.M, S.N, output=True),
420+
):
421+
"""Performs a batched matrix multiplication of two 3D inputs where rhs operand
422+
has its non-batch dimensions transposed.
414423
415-
Numeric casting is performed on the operands to the inner multiply, promoting
416-
them to the same data type as the accumulator/output.
417-
"""
418-
domain(D.b, D.m, D.n, D.k)
419-
implements(ContractionOpInterface)
420-
C[D.b, D.m,
421-
D.n] += TypeFn.cast_signed(U, A[D.b, D.m, D.k]) * TypeFn.cast_signed(
422-
U, B[D.b, D.n, D.k])
424+
Numeric casting is performed on the operands to the inner multiply, promoting
425+
them to the same data type as the accumulator/output.
426+
"""
427+
domain(D.b, D.m, D.n, D.k)
428+
implements(ContractionOpInterface)
429+
C[D.b, D.m, D.n] += TypeFn.cast_signed(U, A[D.b, D.m, D.k]) * TypeFn.cast_signed(
430+
U, B[D.b, D.n, D.k]
431+
)
423432

424433

425434
@linalg_structured_op

mlir/python/mlir/dialects/python_test.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,12 @@
33
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
44

55
from ._python_test_ops_gen import *
6-
from .._mlir_libs._mlirPythonTest import TestAttr, TestType, TestTensorValue, TestIntegerRankedTensorType
6+
from .._mlir_libs._mlirPythonTest import (
7+
TestAttr,
8+
TestType,
9+
TestTensorValue,
10+
TestIntegerRankedTensorType,
11+
)
712

813

914
def register_python_test_dialect(context, load=True):

mlir/python/mlir/runtime/np_to_memref.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ def get_unranked_memref_descriptor(nparray):
114114
d.descriptor = ctypes.cast(ctypes.pointer(x), ctypes.c_void_p)
115115
return d
116116

117+
117118
def move_aligned_ptr_by_offset(aligned_ptr, offset):
118119
"""Moves the supplied ctypes pointer ahead by `offset` elements."""
119120
aligned_addr = ctypes.addressof(aligned_ptr.contents)
@@ -122,6 +123,7 @@ def move_aligned_ptr_by_offset(aligned_ptr, offset):
122123
content_ptr = ctypes.cast(aligned_addr + shift, type(aligned_ptr))
123124
return content_ptr
124125

126+
125127
def unranked_memref_to_numpy(unranked_memref, np_dtype):
126128
"""Converts unranked memrefs to numpy arrays."""
127129
ctp = as_ctype(np_dtype)
@@ -139,10 +141,10 @@ def unranked_memref_to_numpy(unranked_memref, np_dtype):
139141

140142
def ranked_memref_to_numpy(ranked_memref):
141143
"""Converts ranked memrefs to numpy arrays."""
142-
content_ptr = move_aligned_ptr_by_offset(ranked_memref[0].aligned, ranked_memref[0].offset)
143-
np_arr = np.ctypeslib.as_array(
144-
content_ptr, shape=ranked_memref[0].shape
144+
content_ptr = move_aligned_ptr_by_offset(
145+
ranked_memref[0].aligned, ranked_memref[0].offset
145146
)
147+
np_arr = np.ctypeslib.as_array(content_ptr, shape=ranked_memref[0].shape)
146148
strided_arr = np.lib.stride_tricks.as_strided(
147149
np_arr,
148150
np.ctypeslib.as_array(ranked_memref[0].shape),

mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -847,7 +847,6 @@ populateBuilderRegions(const Operator &op,
847847
/// rebuild anew).
848848
static llvm::SmallVector<std::string> emitDefaultOpBuilder(const Operator &op,
849849
raw_ostream &os) {
850-
// If we are asked to skip default builders, comply.
851850
llvm::SmallVector<std::string> builderArgs;
852851
llvm::SmallVector<std::string> builderLines;
853852
llvm::SmallVector<std::string> operandArgNames;
@@ -980,7 +979,6 @@ static void emitRegionAccessors(const Operator &op, raw_ostream &os) {
980979
static void emitValueBuilder(const Operator &op,
981980
llvm::SmallVector<std::string> functionArgs,
982981
raw_ostream &os) {
983-
auto name = sanitizeName(op.getOperationName());
984982
// Params with (possibly) default args.
985983
auto valueBuilderParams =
986984
llvm::map_range(functionArgs, [](const std::string &argAndMaybeDefault) {

0 commit comments

Comments
 (0)