entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
FourierConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/n2/cn2nv44v22b5rijinsmwpconjpikjgpfahzjcn4fpd4fjk3kdnhg.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 4], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dy/cdy6jvxyavfc5kodqzvnt7mpmnd4ui3yherhbzeur6mswedzijjn.py
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = (xindex // 8)
x4 = xindex % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (10*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x5), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 2), (32, 8, 2, 1))
assert_size_stride(primals_3, (4, 4, 2), (8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x, x_ft], Original ATen: [aten.constant_pad_nd, aten._fft_r2c]
buf1 = torch.ops.aten._fft_r2c.default(buf0, [2], 0, True)
buf2 = buf1
del buf1
# Topologically Sorted Source Nodes: [out_ft], Original ATen: [aten.zeros_like]
buf3 = torch.ops.aten.full.default([4, 4, 5], 0, dtype=torch.complex64, layout=torch.strided, device=device(type='cuda', index=0), pin_memory=False)
buf4 = buf3
del buf3
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.slice]
buf5 = torch.ops.aten.slice.Tensor(buf2, 2, 0, 4)
buf6 = buf5
# Topologically Sorted Source Nodes: [view_as_complex], Original ATen: [aten.view_as_complex]
buf7 = torch.ops.aten.view_as_complex.default(primals_2)
buf8 = buf7
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.unsqueeze]
buf9 = torch.ops.aten.unsqueeze.default(buf6, 3)
buf10 = buf9
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf11 = torch.ops.aten.permute.default(buf10, [0, 3, 2, 1])
buf12 = buf11
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.unsqueeze]
buf13 = torch.ops.aten.unsqueeze.default(buf8, 3)
buf14 = buf13
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf15 = torch.ops.aten.permute.default(buf14, [3, 1, 2, 0])
buf16 = buf15
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf17 = torch.ops.aten.permute.default(buf12, [2, 0, 3, 1])
buf18 = buf17
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.view]
buf19 = torch.ops.aten.reshape.default(buf18, [4, 4, 4])
buf20 = buf19
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf21 = torch.ops.aten.permute.default(buf16, [2, 3, 1, 0])
buf22 = buf21
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.view]
buf23 = torch.ops.aten.reshape.default(buf22, [4, 4, 4])
buf24 = buf23
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
buf25 = torch.ops.aten.bmm.default(buf20, buf24)
del buf13
del buf14
del buf15
del buf16
del buf21
del buf22
del buf23
del buf24
del buf7
del buf8
del primals_2
buf26 = buf25
del buf25
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.view]
buf27 = torch.ops.aten.reshape.default(buf26, [4, 4, 1, 4])
buf28 = buf27
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf29 = torch.ops.aten.permute.default(buf28, [1, 3, 0, 2])
buf30 = buf29
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.view]
buf31 = torch.ops.aten.reshape.default(buf30, [4, 4, 4])
buf32 = buf31
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.slice]
buf33 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4)
buf34 = buf33
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.copy]
buf35 = torch.ops.aten.copy.default(buf34, buf32)
del buf26
del buf27
del buf28
del buf29
del buf30
del buf31
del buf32
buf36 = buf35
del buf35
# Topologically Sorted Source Nodes: [], Original ATen: []
buf37 = torch.ops.aten.slice_scatter.default(buf4, buf36, 2, 0, 4)
del buf36
buf38 = buf37
del buf37
# Topologically Sorted Source Nodes: [view_as_complex_1], Original ATen: [aten.view_as_complex]
buf39 = torch.ops.aten.view_as_complex.default(primals_3)
buf40 = buf39
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.slice]
buf41 = torch.ops.aten.slice.Tensor(buf38, 2, 0, 4)
buf42 = buf41
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
buf43 = torch.ops.aten.view.dtype(buf42, torch.float32)
buf44 = buf43
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
buf45 = torch.ops.aten.view.dtype(buf40, torch.float32)
buf46 = buf45
buf47 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf44, buf46, buf47, 128, grid=grid(128), stream=stream0)
del buf39
del buf40
del buf41
del buf42
del buf43
del buf44
del buf45
del buf46
del primals_3
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
buf48 = torch.ops.aten.view.dtype(reinterpret_tensor(buf47, (4, 4, 8), (32, 8, 1), 0), torch.complex64)
buf49 = buf48
# Topologically Sorted Source Nodes: [], Original ATen: []
buf50 = torch.ops.aten.slice_scatter.default(buf38, buf49, 2, 0, 4)
del buf38
del buf47
del buf48
del buf49
buf51 = buf50
del buf50
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.slice]
buf52 = torch.ops.aten.slice.Tensor(buf51, 2, 0, 4)
buf53 = buf52
# Topologically Sorted Source Nodes: [], Original ATen: []
buf54 = torch.ops.aten.slice_scatter.default(buf51, buf53, 2, 0, 4)
del buf51
del buf52
del buf53
buf55 = buf54
del buf54
# Topologically Sorted Source Nodes: [out], Original ATen: [aten._fft_c2r]
buf56 = torch.ops.aten._fft_c2r.default(buf55, [2], 2, 8)
del buf55
buf57 = buf56
del buf56
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
buf58 = torch.ops.aten.permute.default(buf20, [0, 2, 1])
buf59 = buf58
# Topologically Sorted Source Nodes: [], Original ATen: [aten._conj]
buf60 = torch.ops.aten._conj.default(buf59)
buf61 = buf60
return (reinterpret_tensor(buf57, (4, 4, 4), (32, 8, 1), 0), buf4, buf61, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 2), (32, 8, 2, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class FourierConv1d(torch.nn.Module):
def __init__(self, in_channels, out_channels, size, bias=True, periodic
=False):
super(FourierConv1d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if not periodic:
self.size = size
else:
self.size = size // 2
self.weights = torch.nn.Parameter(torch.view_as_real(1 / (
in_channels * out_channels) * torch.rand(in_channels,
out_channels, self.size, dtype=torch.cfloat)))
self.biases = torch.nn.Parameter(torch.view_as_real(1 /
out_channels * torch.rand(out_channels, self.size, dtype=torch.
cfloat)))
self.bias = bias
self.periodic = periodic
def forward(self, x):
if not self.periodic:
padding = self.size
x = torch.nn.functional.pad(x, [0, padding])
x_ft = torch.fft.rfft(x)
out_ft = torch.zeros_like(x_ft)
out_ft[:, :, :self.size] = torch.einsum('bix,iox->box', x_ft[:, :,
:self.size], torch.view_as_complex(self.weights))
if self.bias:
out_ft[:, :, :self.size] += torch.view_as_complex(self.biases)
out = torch.fft.irfft(out_ft, n=x.size(-1))
if not self.periodic:
out = out[..., :-padding]
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'size': 4}]
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = xindex // 8
x4 = xindex % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 10 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x5, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 2), (32, 8, 2, 1))
assert_size_stride(primals_3, (4, 4, 2), (8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(128)](primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = torch.ops.aten._fft_r2c.default(buf0, [2], 0, True)
buf2 = buf1
del buf1
buf3 = torch.ops.aten.full.default([4, 4, 5], 0, dtype=torch.
complex64, layout=torch.strided, device=device(type='cuda',
index=0), pin_memory=False)
buf4 = buf3
del buf3
buf5 = torch.ops.aten.slice.Tensor(buf2, 2, 0, 4)
buf6 = buf5
buf7 = torch.ops.aten.view_as_complex.default(primals_2)
buf8 = buf7
buf9 = torch.ops.aten.unsqueeze.default(buf6, 3)
buf10 = buf9
buf11 = torch.ops.aten.permute.default(buf10, [0, 3, 2, 1])
buf12 = buf11
buf13 = torch.ops.aten.unsqueeze.default(buf8, 3)
buf14 = buf13
buf15 = torch.ops.aten.permute.default(buf14, [3, 1, 2, 0])
buf16 = buf15
buf17 = torch.ops.aten.permute.default(buf12, [2, 0, 3, 1])
buf18 = buf17
buf19 = torch.ops.aten.reshape.default(buf18, [4, 4, 4])
buf20 = buf19
buf21 = torch.ops.aten.permute.default(buf16, [2, 3, 1, 0])
buf22 = buf21
buf23 = torch.ops.aten.reshape.default(buf22, [4, 4, 4])
buf24 = buf23
buf25 = torch.ops.aten.bmm.default(buf20, buf24)
del buf13
del buf14
del buf15
del buf16
del buf21
del buf22
del buf23
del buf24
del buf7
del buf8
del primals_2
buf26 = buf25
del buf25
buf27 = torch.ops.aten.reshape.default(buf26, [4, 4, 1, 4])
buf28 = buf27
buf29 = torch.ops.aten.permute.default(buf28, [1, 3, 0, 2])
buf30 = buf29
buf31 = torch.ops.aten.reshape.default(buf30, [4, 4, 4])
buf32 = buf31
buf33 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4)
buf34 = buf33
buf35 = torch.ops.aten.copy.default(buf34, buf32)
del buf26
del buf27
del buf28
del buf29
del buf30
del buf31
del buf32
buf36 = buf35
del buf35
buf37 = torch.ops.aten.slice_scatter.default(buf4, buf36, 2, 0, 4)
del buf36
buf38 = buf37
del buf37
buf39 = torch.ops.aten.view_as_complex.default(primals_3)
buf40 = buf39
buf41 = torch.ops.aten.slice.Tensor(buf38, 2, 0, 4)
buf42 = buf41
buf43 = torch.ops.aten.view.dtype(buf42, torch.float32)
buf44 = buf43
buf45 = torch.ops.aten.view.dtype(buf40, torch.float32)
buf46 = buf45
buf47 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf0
triton_poi_fused_add_1[grid(128)](buf44, buf46, buf47, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del buf39
del buf40
del buf41
del buf42
del buf43
del buf44
del buf45
del buf46
del primals_3
buf48 = torch.ops.aten.view.dtype(reinterpret_tensor(buf47, (4, 4,
8), (32, 8, 1), 0), torch.complex64)
buf49 = buf48
buf50 = torch.ops.aten.slice_scatter.default(buf38, buf49, 2, 0, 4)
del buf38
del buf47
del buf48
del buf49
buf51 = buf50
del buf50
buf52 = torch.ops.aten.slice.Tensor(buf51, 2, 0, 4)
buf53 = buf52
buf54 = torch.ops.aten.slice_scatter.default(buf51, buf53, 2, 0, 4)
del buf51
del buf52
del buf53
buf55 = buf54
del buf54
buf56 = torch.ops.aten._fft_c2r.default(buf55, [2], 2, 8)
del buf55
buf57 = buf56
del buf56
buf58 = torch.ops.aten.permute.default(buf20, [0, 2, 1])
buf59 = buf58
buf60 = torch.ops.aten._conj.default(buf59)
buf61 = buf60
return reinterpret_tensor(buf57, (4, 4, 4), (32, 8, 1), 0), buf4, buf61
class FourierConv1dNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, size, bias=True, periodic
=False):
super(FourierConv1dNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if not periodic:
self.size = size
else:
self.size = size // 2
self.weights = torch.nn.Parameter(torch.view_as_real(1 / (
in_channels * out_channels) * torch.rand(in_channels,
out_channels, self.size, dtype=torch.cfloat)))
self.biases = torch.nn.Parameter(torch.view_as_real(1 /
out_channels * torch.rand(out_channels, self.size, dtype=torch.
cfloat)))
self.bias = bias
self.periodic = periodic
def forward(self, input_0):
primals_2 = self.weights
primals_3 = self.biases
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| julian-parker/DAFX22_FNO | FourierConv1d | false | 3,782 | [
"MIT"
] | 0 | 72f30144317a3f8ba8ea23ecf9a0333c81fc87db | https://github.com/julian-parker/DAFX22_FNO/tree/72f30144317a3f8ba8ea23ecf9a0333c81fc87db | import torch
class Model(torch.nn.Module):
def __init__(self, in_channels, out_channels, size, bias=True, periodic
=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if not periodic:
self.size = size
else:
self.size = size // 2
self.weights = torch.nn.Parameter(torch.view_as_real(1 / (
in_channels * out_channels) * torch.rand(in_channels,
out_channels, self.size, dtype=torch.cfloat)))
self.biases = torch.nn.Parameter(torch.view_as_real(1 /
out_channels * torch.rand(out_channels, self.size, dtype=torch.
cfloat)))
self.bias = bias
self.periodic = periodic
def forward(self, x):
if not self.periodic:
padding = self.size
x = torch.nn.functional.pad(x, [0, padding])
x_ft = torch.fft.rfft(x)
out_ft = torch.zeros_like(x_ft)
out_ft[:, :, :self.size] = torch.einsum('bix,iox->box', x_ft[:, :,
:self.size], torch.view_as_complex(self.weights))
if self.bias:
out_ft[:, :, :self.size] += torch.view_as_complex(self.biases)
out = torch.fft.irfft(out_ft, n=x.size(-1))
if not self.periodic:
out = out[..., :-padding]
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Duel_QNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py
# Topologically Sorted Source Nodes: [state], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# state => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zq/czq36dtke4tzajhnd2fjd4pikm7iwib36lx3mwn6v6q3npjg6tew.py
# Topologically Sorted Source Nodes: [mean, sub, state_2], Original ATen: [aten.mean, aten.sub, aten.add]
# Source node to ATen node mapping:
# mean => mean
# state_2 => add
# sub => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%view_7,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %sub), kwargs = {})
triton_per_fused_add_mean_sub_1 = async_compile.triton('triton_per_fused_add_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_out_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp6 = tmp4 + tmp5
tmp7 = 256.0
tmp8 = tmp3 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp6 + tmp9
tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [state], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 4096, grid=grid(4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [state_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf8, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), out=buf4)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [advantage], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_9
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [mean, sub, state_2], Original ATen: [aten.mean, aten.sub, aten.add]
triton_per_fused_add_mean_sub_1.run(buf7, buf5, primals_7, 1, 256, grid=grid(1), stream=stream0)
del buf5
del primals_7
return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 64), (64, 1), 0), primals_8, primals_6, buf8, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Duel_QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(Duel_QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.in_fc1 = nn.Linear(state_size, 64)
self.hidden = nn.Linear(64, 64)
self.value = nn.Linear(64, action_size)
self.advantage = nn.Linear(64, action_size)
self.dropout = nn.Dropout(0.4)
def forward(self, state):
"""Build a network that maps state -> action values."""
state = F.relu(self.in_fc1(state))
state = F.relu(self.hidden(state))
value = self.value(state)
advantage = self.advantage(state)
state = value + (advantage - torch.mean(advantage))
return state
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_per_fused_add_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_out_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp6 = tmp4 + tmp5
tmp7 = 256.0
tmp8 = tmp3 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp6 + tmp9
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf9, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), out=buf4)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf5)
del primals_9
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_per_fused_add_mean_sub_1[grid(1)](buf7, buf5, primals_7, 1,
256, num_warps=2, num_stages=1)
del buf5
del primals_7
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 64), (64, 1), 0
), primals_8, primals_6, buf8, primals_4, buf9
class Duel_QNetworkNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(Duel_QNetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.in_fc1 = nn.Linear(state_size, 64)
self.hidden = nn.Linear(64, 64)
self.value = nn.Linear(64, action_size)
self.advantage = nn.Linear(64, action_size)
self.dropout = nn.Dropout(0.4)
def forward(self, input_0):
primals_1 = self.in_fc1.weight
primals_2 = self.in_fc1.bias
primals_4 = self.hidden.weight
primals_5 = self.hidden.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_8 = self.advantage.weight
primals_9 = self.advantage.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| jsztompka/DuelDQN | Duel_QNetwork | false | 3,783 | [
"MIT"
] | 0 | 3b1234425b66034ef233ac988305dc13ffbf7ace | https://github.com/jsztompka/DuelDQN/tree/3b1234425b66034ef233ac988305dc13ffbf7ace | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.in_fc1 = nn.Linear(state_size, 64)
self.hidden = nn.Linear(64, 64)
self.value = nn.Linear(64, action_size)
self.advantage = nn.Linear(64, action_size)
self.dropout = nn.Dropout(0.4)
def forward(self, state):
"""Build a network that maps state -> action values."""
state = F.relu(self.in_fc1(state))
state = F.relu(self.hidden(state))
value = self.value(state)
advantage = self.advantage(state)
state = value + (advantage - torch.mean(advantage))
return state
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
VAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hv/chv6pv44lsce4whqmnuaypyoh6jutk3hh6yovixavbswm7mdta3f.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (48*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wr/cwrbaplpfk7m6giisotqeykajo7urpubzk4y7hl6wjrhxxtwwukj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (512*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dx/cdx5ml2qpofihmmpnvabqkpaoyptwmwdx4jtjzptieewtlhrqlmf.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kv/ckvorupxanzrceis7ogps6qnxhad4srcb6zrfzpkwhenxdnsalg7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (2048*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gj/cgjk6oyn7d2k7tawn6q6nelsui2ldu54ytbdku7v7hgqzgohxqri.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xd/cxdxb7tcecdrygp7d6dxpeakmpxug2fn4gzukjyf4vazkydyidln.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zc/czcxgooldgpjdotlr54s2gygpkelgbayy4gcii54levkma7slwhu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6a/c6axhoo3iq27epmquggqxv5litp5xhw4r5byniurtrwhc3uobvjt.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (108*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nn/cnnrha2fherbxf4u4ol3reswxwrhd2on7n4ktcvs6jj5lim7f4hb.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e3/ce32guj5uo4yfbgfyav7w7f5l7pqh2dwdpgu5s7bvggocb654zst.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iy/ciyceulo5ucqtwtx5ngamvwhtb6klh6npn5n2vyna4e3z3wvxoh7.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qo/cqoyhizmed2eqczsctwwli6z6t6s7lmxxawtcrmzjclvfd2clc7v.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_12 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (1024*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (256*x2) + (1024*y1)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/z6/cz6agmgbvpbegb2vpnyhdf5hbitnlkigqr3264t2oidwyhq5zbt5.py
# Topologically Sorted Source Nodes: [sigma, mul, z], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# sigma => exp
# z => add
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%addmm_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %addmm), kwargs = {})
triton_poi_fused_add_exp_mul_13 = async_compile.triton('triton_poi_fused_add_exp_mul_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yo/cyo7mrbkbxytlgwjzo5zy6seg3g4hmpc4npfybdxetljslnyaoyn.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_relu_threshold_backward_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wb/cwbu4ghjbsppchfi4n3xvx6dxq2prkxpnirbyjw4k26veiz3bwbp.py
# Topologically Sorted Source Nodes: [conv_transpose2d, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution_4
# x_7 => relu_5
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %primals_16, %primals_17, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ki/ckiytdtk6bbkxiexegri7l2e2lsqu73pxroxeibcovuhiymcvnh2.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_8], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_5
# x_8 => relu_6
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_18, %primals_19, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ww/cww3gdzm5h4lqdcqon5gadse23acwjsz5ic2xribo6zzsegnijfr.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_6
# x_9 => relu_7
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_20, %primals_21, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/c6/cc6geroiohteygkwlczzd6tm5pg22vftmfs6p4mdgzy4booeq6c2.py
# Topologically Sorted Source Nodes: [conv_transpose2d_3, x_10], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv_transpose2d_3 => convolution_7
# x_10 => sigmoid
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_22, %primals_23, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_sigmoid_18 = async_compile.triton('triton_poi_fused_convolution_sigmoid_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (3*x2) + (12288*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp3, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (1024, 4), (4, 1))
assert_size_stride(primals_15, (1024, ), (1, ))
assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_19, (64, ), (1, ))
assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_21, (32, ), (1, ))
assert_size_stride(primals_22, (32, 3, 6, 6), (108, 36, 6, 1))
assert_size_stride(primals_23, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 4, 4), (48, 1, 12, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 96, 16, grid=grid(96, 16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 16, grid=grid(2048, 16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 16, grid=grid(8192, 16), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 32768, 16, grid=grid(32768, 16), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_16, buf5, 131072, 25, grid=grid(131072, 25), stream=stream0)
del primals_16
buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_18, buf6, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_18
buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_7.run(primals_20, buf7, 2048, 36, grid=grid(2048, 36), stream=stream0)
del primals_20
buf8 = empty_strided_cuda((32, 3, 6, 6), (108, 1, 18, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_22, buf8, 96, 36, grid=grid(96, 36), stream=stream0)
del primals_22
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf10, primals_2, 123008, grid=grid(123008), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf12, primals_5, 50176, grid=grid(50176), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf14, primals_7, 18432, grid=grid(18432), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256))
buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.float32)
buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_12.run(buf15, primals_9, buf16, buf33, 1024, 4, grid=grid(1024, 4), stream=stream0)
del primals_9
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logsigma], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf18)
del primals_13
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like]
buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigma, mul, z], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_13.run(buf20, buf18, buf17, buf21, 16, grid=grid(16), stream=stream0)
buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024), (1, 4), 0), out=buf22)
buf23 = buf22; del buf22 # reuse
buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_14.run(buf23, primals_15, buf32, 4096, grid=grid(4096), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf25, primals_17, 12800, grid=grid(12800), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf27, primals_19, 43264, grid=grid(43264), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf29, primals_21, 115200, grid=grid(115200), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [conv_transpose2d_3], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 3, 64, 64), (12288, 1, 192, 3))
buf31 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d_3, x_10], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_18.run(buf30, primals_23, buf31, 12, 4096, grid=grid(12, 4096), stream=stream0)
del buf30
del primals_23
return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32, primals_14, primals_12, primals_10, buf33, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1024, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((64, 32, 6, 6), (1152, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((32, 3, 6, 6), (108, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class Encoder(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.latent_size = latent_size
self.conv1 = nn.Conv2d(3, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.flatten = nn.Flatten()
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.flatten(x)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class Decoder(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.latent_size = latent_size
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.view(x.size(0), x.size(1), 1, 1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
x = torch.sigmoid(self.deconv4(x))
return x
class VAE(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.encoder = Encoder(latent_size)
self.decoder = Decoder(latent_size)
def encode(self, x, reparameterize=False):
mu, logsigma = self.encoder(x)
if reparameterize:
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
return z
return mu
def forward(self, x):
mu, logsigma = self.encoder(x)
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
recon_x = self.decoder(z)
return recon_x, mu, logsigma
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'latent_size': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 108 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (1024, 4), (4, 1))
assert_size_stride(primals_15, (1024,), (1,))
assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_21, (32,), (1,))
assert_size_stride(primals_22, (32, 3, 6, 6), (108, 36, 6, 1))
assert_size_stride(primals_23, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 4, 4), (48, 1, 12, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 16)](primals_1, buf0, 96, 16, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_5[grid(131072, 25)](primals_16, buf5, 131072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_16
buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_6[grid(8192, 25)](primals_18, buf6, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_18
buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch
.float32)
triton_poi_fused_7[grid(2048, 36)](primals_20, buf7, 2048, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_20
buf8 = empty_strided_cuda((32, 3, 6, 6), (108, 1, 18, 3), torch.float32
)
triton_poi_fused_8[grid(96, 36)](primals_22, buf8, 96, 36, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_22
buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_9[grid(123008)](buf10, primals_2,
123008, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_10[grid(50176)](buf12, primals_5,
50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_11[grid(18432)](buf14, primals_7,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256))
buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(1024, 4)](
buf15, primals_9, buf16, buf33, 1024, 4, XBLOCK=1, YBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf18)
del primals_13
buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_exp_mul_13[grid(16)](buf20, buf18, buf17,
buf21, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0)
del buf15
extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024),
(1, 4), 0), out=buf22)
buf23 = buf22
del buf22
buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(4096)](buf23,
primals_15, buf32, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4,
1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding=
(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_15[grid(12800)](buf25, primals_17,
12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_16[grid(43264)](buf27, primals_19,
43264, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_17[grid(115200)](buf29,
primals_21, 115200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 3, 64, 64), (12288, 1, 192, 3))
buf31 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_sigmoid_18[grid(12, 4096)](buf30,
primals_23, buf31, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4,
num_stages=1)
del buf30
del primals_23
return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6,
buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4,
1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32,
primals_14, primals_12, primals_10, buf33)
class Encoder(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.latent_size = latent_size
self.conv1 = nn.Conv2d(3, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.flatten = nn.Flatten()
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.flatten(x)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class Decoder(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.latent_size = latent_size
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.view(x.size(0), x.size(1), 1, 1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
x = torch.sigmoid(self.deconv4(x))
return x
class VAENew(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.encoder = Encoder(latent_size)
self.decoder = Decoder(latent_size)
def encode(self, x, reparameterize=False):
mu, logsigma = self.encoder(x)
if reparameterize:
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
return z
return mu
def forward(self, input_0):
primals_1 = self.encoder.conv1.weight
primals_2 = self.encoder.conv1.bias
primals_4 = self.encoder.conv2.weight
primals_5 = self.encoder.conv2.bias
primals_6 = self.encoder.conv3.weight
primals_7 = self.encoder.conv3.bias
primals_8 = self.encoder.conv4.weight
primals_9 = self.encoder.conv4.bias
primals_10 = self.encoder.fc_mu.weight
primals_11 = self.encoder.fc_mu.bias
primals_12 = self.encoder.fc_logsigma.weight
primals_13 = self.encoder.fc_logsigma.bias
primals_14 = self.decoder.fc1.weight
primals_15 = self.decoder.fc1.bias
primals_16 = self.decoder.deconv1.weight
primals_17 = self.decoder.deconv1.bias
primals_18 = self.decoder.deconv2.weight
primals_19 = self.decoder.deconv2.bias
primals_20 = self.decoder.deconv3.weight
primals_21 = self.decoder.deconv3.bias
primals_22 = self.decoder.deconv4.weight
primals_23 = self.decoder.deconv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0], output[1], output[2]
| jinyeom/ga-plastic-models | VAE | false | 3,784 | [
"MIT"
] | 0 | e38b245ae51c35a5f32679cc9f215463a3d58f1a | https://github.com/jinyeom/ga-plastic-models/tree/e38b245ae51c35a5f32679cc9f215463a3d58f1a | import torch
from torch import nn
from torch.nn import functional as F
class Encoder(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.latent_size = latent_size
self.conv1 = nn.Conv2d(3, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.flatten = nn.Flatten()
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.flatten(x)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class Decoder(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.latent_size = latent_size
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, 3, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.view(x.size(0), x.size(1), 1, 1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
x = torch.sigmoid(self.deconv4(x))
return x
class Model(nn.Module):
def __init__(self, latent_size):
super().__init__()
self.encoder = Encoder(latent_size)
self.decoder = Decoder(latent_size)
def encode(self, x, reparameterize=False):
mu, logsigma = self.encoder(x)
if reparameterize:
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
return z
return mu
def forward(self, x):
mu, logsigma = self.encoder(x)
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
recon_x = self.decoder(z)
return recon_x, mu, logsigma
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4]
|
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lh/clhtaboxxs526aw4bqcb7s6xoig5vzwco55tfg6waaga3ao3elgd.py
# Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm]
# Source node to ATen node mapping:
# euclidean_distance => add, pow_1, pow_2, sub, sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused_add_norm_sub_0 = async_compile.triton('triton_poi_fused_add_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + (x0), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y6/cy6x26dxc7bdnmh5zf6kyg4onkvqgh2af46gfstluehu7emttlju.py
# Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add_1
# clamp => clamp_min
# loss_contrastive => mean
# mul => mul
# mul_1 => mul_1
# pow_1 => pow_3
# pow_2 => pow_4
# sub => sub_1
# sub_1 => sub_2
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %pow_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_clamp_mean_mul_pow_rsub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp3 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 4.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm]
stream0 = get_raw_stream(0)
triton_poi_fused_add_norm_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean]
triton_per_fused_add_clamp_mean_mul_pow_rsub_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision import transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True
)
None
loss_contrastive = torch.mean((1 - label) * torch.pow(
euclidean_distance, 2) + label * torch.pow(torch.clamp(self.
margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torchvision import transforms as transforms
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 4.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| justinluyao/phd_thesis | ContrastiveLoss | false | 3,785 | [
"MIT"
] | 0 | 0a61f5deaac86dd34839ce24c2ad89e1411a8540 | https://github.com/justinluyao/phd_thesis/tree/0a61f5deaac86dd34839ce24c2ad89e1411a8540 | import torch
from torchvision import transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin):
super().__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True
)
None
loss_contrastive = torch.mean((1 - label) * torch.pow(
euclidean_distance, 2) + label * torch.pow(torch.clamp(self.
margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MultiHeadSelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/t4/ct4veshscig5vgvyj32uhwuca65ykr2tiu5huzxkuudzphoft4m4.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_2,), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ji/cjiuxsaveibxcx77pxybrxhp7ix53ahmzdmwwqw4rlhuo76kmhhj.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_5, 2.0), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/54/c545jcidaxblcqouel6ztiwyytwzuaasohh4q2qelpa3u5sbntsf.py
# Topologically Sorted Source Nodes: [keys_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# keys_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py
# Topologically Sorted Source Nodes: [result], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# result => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py
# Topologically Sorted Source Nodes: [result], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# result => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf0, primals_3, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [keys_1], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf0, primals_3, buf3, 64, grid=grid(64), stream=stream0)
del buf0
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [result], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, buf1, out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [outputs_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_5
return (reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import Dropout
from torch.nn import Linear
def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int'=-1
) ->torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
result = torch.nn.functional.softmax(vector + (1 - mask) * -
10000000000.0, dim=dim)
return result
def weighted_sum(matrix: 'torch.Tensor', attention: 'torch.Tensor'
) ->torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
class MultiHeadSelfAttention(Module):
"""
This class implements the key-value scaled dot product attention mechanism
detailed in the paper `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
The attention mechanism is a weighted sum of a projection V of the inputs, with respect
to the scaled, normalised dot product of Q and K, which are also both linear projections
of the input. This procedure is repeated for each attention head, using different parameters.
Parameters
----------
num_heads : ``int``, required.
The number of attention heads to use.
input_dim : ``int``, required.
The size of the last dimension of the input tensor.
attention_dim ``int``, required.
The total dimension of the query and key projections which comprise the
dot product attention function. Must be divisible by ``num_heads``.
values_dim : ``int``, required.
The total dimension which the input is projected to for representing the values,
which are combined using the attention. Must be divisible by ``num_heads``.
output_projection_dim : ``int``, optional (default = None)
The dimensionality of the final output projection. If this is not passed
explicitly, the projection has size `input_size`.
attention_dropout_prob : ``float``, optional (default = 0.1).
The dropout probability applied to the normalised attention
distributions.
"""
def __init__(self, input_dim: 'int', attention_dim: 'int', values_dim:
'int', num_heads: 'int'=1, output_projection_dim: 'int'=None,
attention_dropout_prob: 'float'=0.1) ->None:
super(MultiHeadSelfAttention, self).__init__()
self._num_heads = num_heads
self._input_dim = input_dim
self._output_dim = output_projection_dim or input_dim
self._attention_dim = attention_dim
self._values_dim = values_dim
if attention_dim % num_heads != 0:
raise ValueError(
f'Key size ({attention_dim}) must be divisible by the number of attention heads ({num_heads}).'
)
if values_dim % num_heads != 0:
raise ValueError(
f'Value size ({values_dim}) must be divisible by the number of attention heads ({num_heads}).'
)
self._combined_projection = Linear(input_dim, 2 * attention_dim +
values_dim)
self._scale = (input_dim // num_heads) ** 0.5
self._output_projection = Linear(values_dim, self._output_dim)
self._attention_dropout = Dropout(attention_dropout_prob)
def get_input_dim(self):
return self._input_dim
def get_output_dim(self):
return self._output_dim
def is_bidirectional(self):
return False
def forward(self, inputs: 'torch.Tensor', mask: 'torch.LongTensor'=None
) ->torch.FloatTensor:
"""
Parameters
----------
inputs : ``torch.FloatTensor``, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : ``torch.FloatTensor``, optional (default = None).
A tensor of shape (batch_size, timesteps).
Returns
-------
A tensor of shape (batch_size, timesteps, output_projection_dim),
where output_projection_dim = input_dim by default.
"""
num_heads = self._num_heads
batch_size, timesteps, _ = inputs.size()
if mask is None:
mask = inputs.new_ones(batch_size, timesteps)
combined_projection = self._combined_projection(inputs)
queries, keys, *values = combined_projection.split(self.
_attention_dim, -1)
queries = queries.contiguous()
keys = keys.contiguous()
values = torch.cat(values, -1).contiguous()
values_per_head = values.view(batch_size, timesteps, num_heads, int
(self._values_dim / num_heads))
values_per_head = values_per_head.transpose(1, 2).contiguous()
values_per_head = values_per_head.view(batch_size * num_heads,
timesteps, int(self._values_dim / num_heads))
queries_per_head = queries.view(batch_size, timesteps, num_heads,
int(self._attention_dim / num_heads))
queries_per_head = queries_per_head.transpose(1, 2).contiguous()
queries_per_head = queries_per_head.view(batch_size * num_heads,
timesteps, int(self._attention_dim / num_heads))
keys_per_head = keys.view(batch_size, timesteps, num_heads, int(
self._attention_dim / num_heads))
keys_per_head = keys_per_head.transpose(1, 2).contiguous()
keys_per_head = keys_per_head.view(batch_size * num_heads,
timesteps, int(self._attention_dim / num_heads))
scaled_similarities = torch.bmm(queries_per_head / self._scale,
keys_per_head.transpose(1, 2))
attention = masked_softmax(scaled_similarities, mask.repeat(1,
num_heads).view(batch_size * num_heads, timesteps))
attention = self._attention_dropout(attention)
outputs = weighted_sum(values_per_head, attention)
outputs = outputs.view(batch_size, num_heads, timesteps, int(self.
_values_dim / num_heads))
outputs = outputs.transpose(1, 2).contiguous()
outputs = outputs.view(batch_size, timesteps, self._values_dim)
outputs = self._output_projection(outputs)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'attention_dim': 4, 'values_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch.nn import Dropout
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf0, primals_3, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_1[grid(64)](buf0, primals_3, buf2, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(64)](buf0, primals_3, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (4, 4, 4), (16, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_4[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.bmm(buf6, buf1, out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_5
return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), buf3
def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int'=-1
) ->torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
result = torch.nn.functional.softmax(vector + (1 - mask) * -
10000000000.0, dim=dim)
return result
def weighted_sum(matrix: 'torch.Tensor', attention: 'torch.Tensor'
) ->torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
class MultiHeadSelfAttentionNew(Module):
"""
This class implements the key-value scaled dot product attention mechanism
detailed in the paper `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
The attention mechanism is a weighted sum of a projection V of the inputs, with respect
to the scaled, normalised dot product of Q and K, which are also both linear projections
of the input. This procedure is repeated for each attention head, using different parameters.
Parameters
----------
num_heads : ``int``, required.
The number of attention heads to use.
input_dim : ``int``, required.
The size of the last dimension of the input tensor.
attention_dim ``int``, required.
The total dimension of the query and key projections which comprise the
dot product attention function. Must be divisible by ``num_heads``.
values_dim : ``int``, required.
The total dimension which the input is projected to for representing the values,
which are combined using the attention. Must be divisible by ``num_heads``.
output_projection_dim : ``int``, optional (default = None)
The dimensionality of the final output projection. If this is not passed
explicitly, the projection has size `input_size`.
attention_dropout_prob : ``float``, optional (default = 0.1).
The dropout probability applied to the normalised attention
distributions.
"""
def __init__(self, input_dim: 'int', attention_dim: 'int', values_dim:
'int', num_heads: 'int'=1, output_projection_dim: 'int'=None,
attention_dropout_prob: 'float'=0.1) ->None:
super(MultiHeadSelfAttentionNew, self).__init__()
self._num_heads = num_heads
self._input_dim = input_dim
self._output_dim = output_projection_dim or input_dim
self._attention_dim = attention_dim
self._values_dim = values_dim
if attention_dim % num_heads != 0:
raise ValueError(
f'Key size ({attention_dim}) must be divisible by the number of attention heads ({num_heads}).'
)
if values_dim % num_heads != 0:
raise ValueError(
f'Value size ({values_dim}) must be divisible by the number of attention heads ({num_heads}).'
)
self._combined_projection = Linear(input_dim, 2 * attention_dim +
values_dim)
self._scale = (input_dim // num_heads) ** 0.5
self._output_projection = Linear(values_dim, self._output_dim)
self._attention_dropout = Dropout(attention_dropout_prob)
def get_input_dim(self):
return self._input_dim
def get_output_dim(self):
return self._output_dim
def is_bidirectional(self):
return False
def forward(self, input_0):
primals_2 = self._combined_projection.weight
primals_3 = self._combined_projection.bias
primals_4 = self._output_projection.weight
primals_5 = self._output_projection.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| jsonW0/StrokeOrderEmbeddings | MultiHeadSelfAttention | false | 3,786 | [
"Apache-2.0"
] | 0 | aa73b216a118de2efba1d299b96990ba9244fa3f | https://github.com/jsonW0/StrokeOrderEmbeddings/tree/aa73b216a118de2efba1d299b96990ba9244fa3f | from torch.nn import Module
import torch
from torch.nn import Dropout
from torch.nn import Linear
def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int'=-1
) ->torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
result = torch.nn.functional.softmax(vector + (1 - mask) * -
10000000000.0, dim=dim)
return result
def weighted_sum(matrix: 'torch.Tensor', attention: 'torch.Tensor'
) ->torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
class Model(Module):
"""
This class implements the key-value scaled dot product attention mechanism
detailed in the paper `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
The attention mechanism is a weighted sum of a projection V of the inputs, with respect
to the scaled, normalised dot product of Q and K, which are also both linear projections
of the input. This procedure is repeated for each attention head, using different parameters.
Parameters
----------
# ... truncated (>4000 chars) for memory efficiency |
CustomGruCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3o/c3o3rsqvwtwazhhofhyvvpsx3gxpac2vdmskpikbppasjhmmc5gs.py
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# hy => add_3
# inputgate => sigmoid_1
# mul => mul
# mul_1 => mul_1
# newgate => tanh
# resetgate => sigmoid
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %view_11), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %mul), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_6, %tanh), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, %mul_1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sub_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sub_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sub_tanh_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr1 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), xmask)
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr6 + (x2), xmask)
tmp17 = tl.load(in_ptr7 + (x2), xmask)
tmp21 = tl.load(in_ptr8 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp7 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp22 = tmp21 - tmp20
tmp23 = tmp15 * tmp22
tmp24 = tmp20 + tmp23
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
tl.store(in_out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3)
del primals_9
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_n], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_11
del primals_12
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_13
del primals_14
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, resetgate, add_1, inputgate, mul, add_2, newgate, sub, mul_1, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0.run(buf6, buf7, primals_2, buf1, primals_5, primals_8, buf3, primals_10, buf4, buf5, primals_6, buf8, 256, grid=grid(256), stream=stream0)
del buf1
del buf3
del primals_10
del primals_2
del primals_5
del primals_8
return (buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, buf5, buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
class CustomGruCell(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(CustomGruCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, h):
i_r = self.fc_ir(x)
h_r = self.fc_hr(h)
i_z = self.fc_iz(x)
h_z = self.fc_hz(h)
i_n = self.fc_in(x)
h_n = self.fc_hn(h)
resetgate = (i_r + h_r).sigmoid()
inputgate = (i_z + h_z).sigmoid()
newgate = (i_n + resetgate * h_n).tanh()
hy = newgate + inputgate * (h - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, xmask)
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr6 + x2, xmask)
tmp17 = tl.load(in_ptr7 + x2, xmask)
tmp21 = tl.load(in_ptr8 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp7 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp22 = tmp21 - tmp20
tmp23 = tmp15 * tmp22
tmp24 = tmp20 + tmp23
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(in_out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr0 + x2, tmp24, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3)
del primals_9
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf4)
del primals_11
del primals_12
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4),
0), alpha=1, beta=1, out=buf5)
del primals_13
del primals_14
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(256)](buf6, buf7,
primals_2, buf1, primals_5, primals_8, buf3, primals_10, buf4,
buf5, primals_6, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf3
del primals_10
del primals_2
del primals_5
del primals_8
return buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf4, buf5, buf6, buf7
class CustomGruCellNew(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(CustomGruCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, input_0, input_1):
primals_1 = self.fc_ir.weight
primals_2 = self.fc_ir.bias
primals_4 = self.fc_hr.weight
primals_5 = self.fc_hr.bias
primals_7 = self.fc_iz.weight
primals_8 = self.fc_iz.bias
primals_9 = self.fc_hz.weight
primals_10 = self.fc_hz.bias
primals_11 = self.fc_in.weight
primals_12 = self.fc_in.bias
primals_13 = self.fc_hn.weight
primals_14 = self.fc_hn.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| juharris/PySyft | CustomGruCell | false | 3,787 | [
"Apache-2.0"
] | 0 | dbb70f24cc55a7dca032fb06f1a8662cb15092a9 | https://github.com/juharris/PySyft/tree/dbb70f24cc55a7dca032fb06f1a8662cb15092a9 | import torch
import numpy as np
from torch import nn
class Model(nn.Module):
"""
A forward only GRU cell.
Input should be: (sequence length x batch size x input_size).
The output is the output of the final forward call.
It's not clear if it would be possible to use the output from each cell in a Plan
because of the assumptions of 2D tensors in backprop.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias)
self.fc_in = nn.Linear(input_size, hidden_size, bias=bias)
self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias)
self.init_parameters()
def init_parameters(self):
std = 1.0 / np.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, h):
i_r = self.fc_ir(x)
h_r = self.fc_hr(h)
i_z = self.fc_iz(x)
h_z = self.fc_hz(h)
i_n = self.fc_in(x)
h_n = self.fc_hn(h)
resetgate = (i_r + h_r).sigmoid()
inputgate = (i_z + h_z).sigmoid()
newgate = (i_n + resetgate * h_n).tanh()
hy = newgate + inputgate * (h - newgate)
return hy
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
EncoderImagePrecomp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/eh/cehub2x5hwfjubqlrp676sb4cg4m75tyrw25e27srzpwx5wvmztm.py
# Topologically Sorted Source Nodes: [X], Original ATen: [aten.div]
# Source node to ATen node mapping:
# X => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [X], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from collections import OrderedDict
import torch.nn as nn
import torch.nn.init
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
a = norm.expand_as(X)
X = torch.div(X, a)
return X
class EncoderImagePrecomp(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecomp, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = False
if img_dim != embed_size or True:
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
if self.fc:
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.
out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = images
if self.fc:
features = self.fc(images)
if not self.no_imgnorm:
features = l2norm(features)
if self.use_abs:
features = torch.abs(features)
return features
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImagePrecomp, self).load_state_dict(new_state)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'img_dim': 4, 'embed_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from collections import OrderedDict
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
a = norm.expand_as(X)
X = torch.div(X, a)
return X
class EncoderImagePrecompNew(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecompNew, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = False
if img_dim != embed_size or True:
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
if self.fc:
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.
out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImagePrecompNew, self).load_state_dict(new_state)
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| jwehrmann/seamretrieval | EncoderImagePrecomp | false | 3,788 | [
"Apache-2.0"
] | 0 | ff94dccc28d56ffbbb7813832c0adbab7b7c6107 | https://github.com/jwehrmann/seamretrieval/tree/ff94dccc28d56ffbbb7813832c0adbab7b7c6107 | import torch
import numpy as np
from collections import OrderedDict
import torch.nn as nn
import torch.nn.init
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
a = norm.expand_as(X)
X = torch.div(X, a)
return X
class Model(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super().__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = False
if img_dim != embed_size or True:
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
if self.fc:
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.
out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = images
if self.fc:
features = self.fc(images)
if not self.no_imgnorm:
features = l2norm(features)
if self.use_abs:
features = torch.abs(features)
return features
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in state_dict.items():
if name in own_state:
new_state[name] = param
super(EncoderImagePrecomp, self).load_state_dict(new_state)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ATANLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jp/cjpowvr7og7tjmnxf4xmk4stfnnkrxmwfsul6tktucio6zkvgte7.py
# Topologically Sorted Source Nodes: [sub, abs_1, atan, loss], Original ATen: [aten.sub, aten.abs, aten.atan, aten.mean]
# Source node to ATen node mapping:
# abs_1 => abs_1
# atan => atan
# loss => mean
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %atan : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%abs_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%atan,), kwargs = {})
triton_per_fused_abs_atan_mean_sub_0 = async_compile.triton('triton_per_fused_abs_atan_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_atan_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_atan_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = libdevice.atan(tmp3)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, atan, loss], Original ATen: [aten.sub, aten.abs, aten.atan, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_atan_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ATANLoss(nn.Module):
def __init__(self):
super(ATANLoss, self).__init__()
def forward(self, inputs, targets):
loss = torch.mean(torch.atan(torch.abs(inputs - targets)))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_atan_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = libdevice.atan(tmp3)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_atan_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ATANLossNew(nn.Module):
def __init__(self):
super(ATANLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kamomehz/waveletCodingCNN | ATANLoss | false | 3,789 | [
"MIT"
] | 0 | 50c7db9d986039ded38999b7e4f4265e2250fb90 | https://github.com/kamomehz/waveletCodingCNN/tree/50c7db9d986039ded38999b7e4f4265e2250fb90 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs, targets):
loss = torch.mean(torch.atan(torch.abs(inputs - targets)))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Hidden2DiscreteDeal | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [log_pz], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_pz => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/32/c32vfxouqe74ea5scuzrdhpd7r6adxwu4bzarm4icjfnb47jbizg.py
# Topologically Sorted Source Nodes: [log_pz], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_pz => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_pz], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_pz], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init
class Hidden2DiscreteDeal(nn.Module):
def __init__(self, input_size, z_size, is_lstm=False, has_bias=True):
super(Hidden2DiscreteDeal, self).__init__()
self.z_size = z_size
latent_size = self.z_size
if is_lstm:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.p_c = nn.Linear(input_size, latent_size, bias=has_bias)
else:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.is_lstm = is_lstm
def forward(self, inputs, mask=None):
"""
:param inputs: batch_size x input_size
:return:
"""
if self.is_lstm:
h, c = inputs
if h.dim() == 3:
h = h.squeeze(0)
c = c.squeeze(0)
logits = self.p_h(h) + self.p_c(c)
else:
logits = self.p_h(inputs)
log_pz = F.log_softmax(logits, dim=-1)
return logits, log_pz
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'z_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class Hidden2DiscreteDealNew(nn.Module):
def __init__(self, input_size, z_size, is_lstm=False, has_bias=True):
super(Hidden2DiscreteDealNew, self).__init__()
self.z_size = z_size
latent_size = self.z_size
if is_lstm:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.p_c = nn.Linear(input_size, latent_size, bias=has_bias)
else:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.is_lstm = is_lstm
def forward(self, input_0):
primals_1 = self.p_h.weight
primals_2 = self.p_h.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| justinchiu/NeuralDialog | Hidden2DiscreteDeal | false | 3,790 | [
"Apache-2.0"
] | 0 | f272cc2e12ffdd44c94263ee373208a22c057129 | https://github.com/justinchiu/NeuralDialog/tree/f272cc2e12ffdd44c94263ee373208a22c057129 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init
class Model(nn.Module):
def __init__(self, input_size, z_size, is_lstm=False, has_bias=True):
super().__init__()
self.z_size = z_size
latent_size = self.z_size
if is_lstm:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.p_c = nn.Linear(input_size, latent_size, bias=has_bias)
else:
self.p_h = nn.Linear(input_size, latent_size, bias=has_bias)
self.is_lstm = is_lstm
def forward(self, inputs, mask=None):
"""
:param inputs: batch_size x input_size
:return:
"""
if self.is_lstm:
h, c = inputs
if h.dim() == 3:
h = h.squeeze(0)
c = c.squeeze(0)
logits = self.p_h(h) + self.p_c(c)
else:
logits = self.p_h(inputs)
log_pz = F.log_softmax(logits, dim=-1)
return logits, log_pz
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConvDenoiser | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tk/ctkr5j63ngqpsdf5zpn24uwouxiguv7jseip5x6trqjye5gti4rn.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pw/cpwa7w2sf2mh2epi2372qi4mhvq3tcs5lmubo3et2g2uwsicveur.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hj/chjlqcqvgvftjyvbhh2zc7atigvsupfk4jlzoprjv26edpy76f2g.py
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_4 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qz/cqzdjo4jhc46uyef5m3lmuwlsxitoqbcxdyr572akndmo2bcqnxs.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fh/cfhpvwholq5gmaw2tbecq74hgrzakstjjxyjcynloacfs64vfrpc.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_8], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_5
# x_8 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 540800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4225) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lx/clxq7ah7rykcs3m4foszxqrios4h7nyghvtzm4aujyh4c6xgrbq5.py
# Topologically Sorted Source Nodes: [conv2d_3, x_9], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d_3 => convolution_6
# x_9 => sigmoid
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_sigmoid_7 = async_compile.triton('triton_poi_fused_convolution_sigmoid_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50700
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4225) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (8, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (8, ), (1, ))
assert_size_stride(primals_8, (8, 8, 2, 2), (32, 4, 2, 1))
assert_size_stride(primals_9, (8, ), (1, ))
assert_size_stride(primals_10, (8, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32, ), (1, ))
assert_size_stride(primals_14, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 8, 16, 16), (2048, 256, 16, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 8192, grid=grid(8192), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32)
buf11 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 2048, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 8, 16, 16), (2048, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_9, 8192, grid=grid(8192), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf15, primals_11, 65536, grid=grid(65536), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 32, 65, 65), (135200, 4225, 65, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf17, primals_13, 540800, grid=grid(540800), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 3, 65, 65), (12675, 4225, 65, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_9], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_7.run(buf19, primals_15, 50700, grid=grid(50700), stream=stream0)
del primals_15
return (buf19, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((8, 8, 2, 2), (32, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 16, 2, 2), (64, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((3, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.init
import torch.nn as nn
import torch.nn.functional as F
class ConvDenoiser(nn.Module):
def __init__(self):
super(ConvDenoiser, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, 8, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.t_conv1 = nn.ConvTranspose2d(8, 8, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(16, 32, 3, stride=2)
self.conv_out = nn.Conv2d(32, 3, 3, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = F.relu(self.conv3(x))
x = self.pool(x)
x = F.relu(self.t_conv1(x))
x = F.relu(self.t_conv2(x))
x = F.relu(self.t_conv3(x))
x = F.sigmoid(self.conv_out(x))
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.init
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 540800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4225 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50700
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4225 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (8, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (8, 8, 2, 2), (32, 4, 2, 1))
assert_size_stride(primals_9, (8,), (1,))
assert_size_stride(primals_10, (8, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_15, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2,
buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(65536)](buf5, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf5, buf6,
buf7, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 8, 16, 16), (2048, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(8192)](buf9, primals_7,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32
)
buf11 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(2048)](buf9, buf10,
buf11, 2048, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 8, 16, 16), (2048, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(8192)](buf13, primals_9,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_2[grid(65536)](buf15, primals_11,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf16 = extern_kernels.convolution(buf15, primals_12, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 32, 65, 65), (135200, 4225, 65, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_6[grid(540800)](buf17, primals_13,
540800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 3, 65, 65), (12675, 4225, 65, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_sigmoid_7[grid(50700)](buf19,
primals_15, 50700, XBLOCK=512, num_warps=4, num_stages=1)
del primals_15
return (buf19, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6,
buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19)
class ConvDenoiserNew(nn.Module):
def __init__(self):
super(ConvDenoiserNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, 8, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.t_conv1 = nn.ConvTranspose2d(8, 8, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(16, 32, 3, stride=2)
self.conv_out = nn.Conv2d(32, 3, 3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.t_conv1.weight
primals_9 = self.t_conv1.bias
primals_10 = self.t_conv2.weight
primals_11 = self.t_conv2.bias
primals_12 = self.t_conv3.weight
primals_13 = self.t_conv3.bias
primals_14 = self.conv_out.weight
primals_15 = self.conv_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| joydeba/autocount | ConvDenoiser | false | 3,791 | [
"MIT"
] | 0 | 52ddb47726fa34d5f54e2850dc6690b67c768728 | https://github.com/joydeba/autocount/tree/52ddb47726fa34d5f54e2850dc6690b67c768728 | import torch
import torch.nn.init
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, 8, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.t_conv1 = nn.ConvTranspose2d(8, 8, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(16, 32, 3, stride=2)
self.conv_out = nn.Conv2d(32, 3, 3, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = F.relu(self.conv3(x))
x = self.pool(x)
x = F.relu(self.t_conv1(x))
x = F.relu(self.t_conv2(x))
x = F.relu(self.t_conv3(x))
x = F.sigmoid(self.conv_out(x))
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
SelfAttn | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# scores => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# scores => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0), primals_3, out=buf4)
del buf3
return (reinterpret_tensor(buf4, (4, 4), (4, 1), 0), primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class SelfAttn(nn.Module):
"""
self-attention with learnable parameters
"""
def __init__(self, dhid):
super().__init__()
self.scorer = nn.Linear(dhid, 1)
def forward(self, inp):
scores = F.softmax(self.scorer(inp), dim=1)
cont = scores.transpose(1, 2).bmm(inp).squeeze(1)
return cont
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dhid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0
), primals_3, out=buf4)
del buf3
return reinterpret_tensor(buf4, (4, 4), (4, 1), 0), primals_3, buf1
class SelfAttnNew(nn.Module):
"""
self-attention with learnable parameters
"""
def __init__(self, dhid):
super().__init__()
self.scorer = nn.Linear(dhid, 1)
def forward(self, input_0):
primals_1 = self.scorer.weight
primals_2 = self.scorer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| jzhanson/alfred | SelfAttn | false | 3,792 | [
"MIT"
] | 0 | d5b540e7c9b53d3f70cc2907503935fecff00018 | https://github.com/jzhanson/alfred/tree/d5b540e7c9b53d3f70cc2907503935fecff00018 | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
"""
self-attention with learnable parameters
"""
def __init__(self, dhid):
super().__init__()
self.scorer = nn.Linear(dhid, 1)
def forward(self, inp):
scores = F.softmax(self.scorer(inp), dim=1)
cont = scores.transpose(1, 2).bmm(inp).squeeze(1)
return cont
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
FourierConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ch/cchshlqdtpws4kgth5d3am7fk2jjo6mdnwsaylk7ndt5gaxfibfg.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 4, 0, 4], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nq/cnqwfyssvorfayi2lds23tsmu52r5byrwctfihgw574dh7womo3h.py
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 4
x4 = (xindex // 32)
x5 = xindex % 128
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (10*x1) + (80*x4)), xmask)
tmp1 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x6), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4, 2), (128, 32, 8, 2, 1))
assert_size_stride(primals_3, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x, x_ft], Original ATen: [aten.constant_pad_nd, aten._fft_r2c]
buf1 = torch.ops.aten._fft_r2c.default(buf0, [2, 3], 0, True)
del buf0
buf2 = buf1
del buf1
# Topologically Sorted Source Nodes: [out_ft], Original ATen: [aten.zeros_like]
buf3 = torch.ops.aten.full.default([4, 4, 8, 5], 0, dtype=torch.complex64, layout=torch.strided, device=device(type='cuda', index=0), pin_memory=False)
buf4 = buf3
del buf3
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.slice]
buf5 = torch.ops.aten.slice.Tensor(buf2, 2, 0, 4)
buf6 = buf5
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.slice]
buf7 = torch.ops.aten.slice.Tensor(buf6, 3, 0, 4)
buf8 = buf7
# Topologically Sorted Source Nodes: [view_as_complex], Original ATen: [aten.view_as_complex]
buf9 = torch.ops.aten.view_as_complex.default(primals_2)
buf10 = buf9
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.unsqueeze]
buf11 = torch.ops.aten.unsqueeze.default(buf8, 4)
buf12 = buf11
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf13 = torch.ops.aten.permute.default(buf12, [0, 4, 2, 3, 1])
buf14 = buf13
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.unsqueeze]
buf15 = torch.ops.aten.unsqueeze.default(buf10, 4)
buf16 = buf15
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf17 = torch.ops.aten.permute.default(buf16, [4, 1, 2, 3, 0])
buf18 = buf17
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf19 = torch.ops.aten.permute.default(buf14, [2, 3, 0, 4, 1])
buf20 = buf19
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone]
buf21 = torch.ops.aten.clone.default(buf20, memory_format=torch.contiguous_format)
del buf11
del buf12
del buf13
del buf14
del buf19
del buf2
del buf20
del buf5
del buf6
del buf7
del buf8
buf22 = buf21
del buf21
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten._unsafe_view]
buf23 = torch.ops.aten.reshape.default(buf22, [16, 4, 4])
buf24 = buf23
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf25 = torch.ops.aten.permute.default(buf18, [2, 3, 4, 1, 0])
buf26 = buf25
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.view]
buf27 = torch.ops.aten.reshape.default(buf26, [16, 4, 4])
buf28 = buf27
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
buf29 = torch.ops.aten.bmm.default(buf24, buf28)
del buf10
del buf15
del buf16
del buf17
del buf18
del buf25
del buf26
del buf27
del buf28
del buf9
del primals_2
buf30 = buf29
del buf29
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.view]
buf31 = torch.ops.aten.reshape.default(buf30, [4, 4, 4, 1, 4])
buf32 = buf31
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.permute]
buf33 = torch.ops.aten.permute.default(buf32, [2, 4, 0, 1, 3])
buf34 = buf33
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.view]
buf35 = torch.ops.aten.reshape.default(buf34, [4, 4, 4, 4])
buf36 = buf35
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.slice]
buf37 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4)
buf38 = buf37
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.slice]
buf39 = torch.ops.aten.slice.Tensor(buf38, 3, 0, 4)
buf40 = buf39
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.copy]
buf41 = torch.ops.aten.copy.default(buf40, buf36)
del buf30
del buf31
del buf32
del buf33
del buf34
del buf35
del buf36
buf42 = buf41
del buf41
# Topologically Sorted Source Nodes: [], Original ATen: []
buf43 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4)
buf44 = buf43
# Topologically Sorted Source Nodes: [], Original ATen: []
buf45 = torch.ops.aten.slice_scatter.default(buf44, buf42, 3, 0, 4)
del buf42
buf46 = buf45
del buf45
# Topologically Sorted Source Nodes: [], Original ATen: []
buf47 = torch.ops.aten.slice_scatter.default(buf4, buf46, 2, 0, 4)
del buf46
buf48 = buf47
del buf47
# Topologically Sorted Source Nodes: [view_as_complex_1], Original ATen: [aten.view_as_complex]
buf49 = torch.ops.aten.view_as_complex.default(primals_3)
buf50 = buf49
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.slice]
buf51 = torch.ops.aten.slice.Tensor(buf48, 2, 0, 4)
buf52 = buf51
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.slice]
buf53 = torch.ops.aten.slice.Tensor(buf52, 3, 0, 4)
buf54 = buf53
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
buf55 = torch.ops.aten.view.dtype(buf54, torch.float32)
buf56 = buf55
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
buf57 = torch.ops.aten.view.dtype(buf50, torch.float32)
buf58 = buf57
buf59 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf56, buf58, buf59, 512, grid=grid(512), stream=stream0)
del buf49
del buf50
del buf51
del buf52
del buf53
del buf54
del buf55
del buf56
del buf57
del buf58
del primals_3
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.add]
buf60 = torch.ops.aten.view.dtype(reinterpret_tensor(buf59, (4, 4, 4, 8), (128, 32, 8, 1), 0), torch.complex64)
buf61 = buf60
# Topologically Sorted Source Nodes: [], Original ATen: []
buf62 = torch.ops.aten.slice.Tensor(buf48, 2, 0, 4)
buf63 = buf62
# Topologically Sorted Source Nodes: [], Original ATen: []
buf64 = torch.ops.aten.slice_scatter.default(buf63, buf61, 3, 0, 4)
del buf59
del buf60
del buf61
del buf62
del buf63
buf65 = buf64
del buf64
# Topologically Sorted Source Nodes: [], Original ATen: []
buf66 = torch.ops.aten.slice_scatter.default(buf48, buf65, 2, 0, 4)
del buf48
del buf65
buf67 = buf66
del buf66
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.slice]
buf68 = torch.ops.aten.slice.Tensor(buf67, 2, 0, 4)
buf69 = buf68
# Topologically Sorted Source Nodes: [iadd], Original ATen: [aten.slice]
buf70 = torch.ops.aten.slice.Tensor(buf69, 3, 0, 4)
buf71 = buf70
# Topologically Sorted Source Nodes: [], Original ATen: []
buf72 = torch.ops.aten.slice.Tensor(buf67, 2, 0, 4)
buf73 = buf72
# Topologically Sorted Source Nodes: [], Original ATen: []
buf74 = torch.ops.aten.slice_scatter.default(buf73, buf71, 3, 0, 4)
del buf68
del buf69
del buf70
del buf71
del buf72
del buf73
buf75 = buf74
del buf74
# Topologically Sorted Source Nodes: [], Original ATen: []
buf76 = torch.ops.aten.slice_scatter.default(buf67, buf75, 2, 0, 4)
del buf67
del buf75
buf77 = buf76
del buf76
# Topologically Sorted Source Nodes: [out], Original ATen: [aten._fft_c2r]
buf78 = torch.ops.aten._fft_c2r.default(buf77, [2, 3], 2, 8)
del buf77
buf79 = buf78
del buf78
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
buf80 = torch.ops.aten.permute.default(buf24, [0, 2, 1])
buf81 = buf80
# Topologically Sorted Source Nodes: [], Original ATen: [aten._conj]
buf82 = torch.ops.aten._conj.default(buf81)
buf83 = buf82
return (reinterpret_tensor(buf79, (4, 4, 4, 4), (256, 64, 8, 1), 0), buf4, buf83, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 2), (32, 8, 2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class FourierConv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, size_x, size_y, bias=True,
periodic=False):
super(FourierConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if not periodic:
self.size_x = size_x
self.size_y = size_y
else:
self.size_x = size_x // 2
self.size_y = size_y // 2
self.weights = torch.nn.Parameter(torch.view_as_real(1 / (
in_channels * out_channels) * torch.rand(in_channels,
out_channels, self.size_x, self.size_y, dtype=torch.cfloat)))
self.biases = torch.nn.Parameter(torch.view_as_real(1 /
out_channels * torch.rand(out_channels, self.size_x, self.
size_y, dtype=torch.cfloat)))
self.bias = bias
self.periodic = periodic
def forward(self, x):
if not self.periodic:
x = torch.nn.functional.pad(x, [0, self.size_y, 0, self.size_x])
x_ft = torch.fft.rfft2(x)
out_ft = torch.zeros_like(x_ft)
out_ft[:, :, :self.size_x, :self.size_y] = torch.einsum(
'bixy,ioxy->boxy', x_ft[:, :, :self.size_x, :self.size_y],
torch.view_as_complex(self.weights))
if self.bias:
out_ft[:, :, :self.size_x, :self.size_y] += torch.view_as_complex(
self.biases)
out = torch.fft.irfft2(out_ft)
if not self.periodic:
out = out[..., :self.size_x, :self.size_y]
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'size_x': 4, 'size_y': 4}
]
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 4
x4 = xindex // 32
x5 = xindex % 128
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 10 * x1 + 80 * x4), xmask)
tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x6, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4, 2), (128, 32, 8, 2, 1))
assert_size_stride(primals_3, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = torch.ops.aten._fft_r2c.default(buf0, [2, 3], 0, True)
del buf0
buf2 = buf1
del buf1
buf3 = torch.ops.aten.full.default([4, 4, 8, 5], 0, dtype=torch.
complex64, layout=torch.strided, device=device(type='cuda',
index=0), pin_memory=False)
buf4 = buf3
del buf3
buf5 = torch.ops.aten.slice.Tensor(buf2, 2, 0, 4)
buf6 = buf5
buf7 = torch.ops.aten.slice.Tensor(buf6, 3, 0, 4)
buf8 = buf7
buf9 = torch.ops.aten.view_as_complex.default(primals_2)
buf10 = buf9
buf11 = torch.ops.aten.unsqueeze.default(buf8, 4)
buf12 = buf11
buf13 = torch.ops.aten.permute.default(buf12, [0, 4, 2, 3, 1])
buf14 = buf13
buf15 = torch.ops.aten.unsqueeze.default(buf10, 4)
buf16 = buf15
buf17 = torch.ops.aten.permute.default(buf16, [4, 1, 2, 3, 0])
buf18 = buf17
buf19 = torch.ops.aten.permute.default(buf14, [2, 3, 0, 4, 1])
buf20 = buf19
buf21 = torch.ops.aten.clone.default(buf20, memory_format=torch.
contiguous_format)
del buf11
del buf12
del buf13
del buf14
del buf19
del buf2
del buf20
del buf5
del buf6
del buf7
del buf8
buf22 = buf21
del buf21
buf23 = torch.ops.aten.reshape.default(buf22, [16, 4, 4])
buf24 = buf23
buf25 = torch.ops.aten.permute.default(buf18, [2, 3, 4, 1, 0])
buf26 = buf25
buf27 = torch.ops.aten.reshape.default(buf26, [16, 4, 4])
buf28 = buf27
buf29 = torch.ops.aten.bmm.default(buf24, buf28)
del buf10
del buf15
del buf16
del buf17
del buf18
del buf25
del buf26
del buf27
del buf28
del buf9
del primals_2
buf30 = buf29
del buf29
buf31 = torch.ops.aten.reshape.default(buf30, [4, 4, 4, 1, 4])
buf32 = buf31
buf33 = torch.ops.aten.permute.default(buf32, [2, 4, 0, 1, 3])
buf34 = buf33
buf35 = torch.ops.aten.reshape.default(buf34, [4, 4, 4, 4])
buf36 = buf35
buf37 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4)
buf38 = buf37
buf39 = torch.ops.aten.slice.Tensor(buf38, 3, 0, 4)
buf40 = buf39
buf41 = torch.ops.aten.copy.default(buf40, buf36)
del buf30
del buf31
del buf32
del buf33
del buf34
del buf35
del buf36
buf42 = buf41
del buf41
buf43 = torch.ops.aten.slice.Tensor(buf4, 2, 0, 4)
buf44 = buf43
buf45 = torch.ops.aten.slice_scatter.default(buf44, buf42, 3, 0, 4)
del buf42
buf46 = buf45
del buf45
buf47 = torch.ops.aten.slice_scatter.default(buf4, buf46, 2, 0, 4)
del buf46
buf48 = buf47
del buf47
buf49 = torch.ops.aten.view_as_complex.default(primals_3)
buf50 = buf49
buf51 = torch.ops.aten.slice.Tensor(buf48, 2, 0, 4)
buf52 = buf51
buf53 = torch.ops.aten.slice.Tensor(buf52, 3, 0, 4)
buf54 = buf53
buf55 = torch.ops.aten.view.dtype(buf54, torch.float32)
buf56 = buf55
buf57 = torch.ops.aten.view.dtype(buf50, torch.float32)
buf58 = buf57
buf59 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1),
torch.float32)
triton_poi_fused_add_1[grid(512)](buf56, buf58, buf59, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del buf49
del buf50
del buf51
del buf52
del buf53
del buf54
del buf55
del buf56
del buf57
del buf58
del primals_3
buf60 = torch.ops.aten.view.dtype(reinterpret_tensor(buf59, (4, 4,
4, 8), (128, 32, 8, 1), 0), torch.complex64)
buf61 = buf60
buf62 = torch.ops.aten.slice.Tensor(buf48, 2, 0, 4)
buf63 = buf62
buf64 = torch.ops.aten.slice_scatter.default(buf63, buf61, 3, 0, 4)
del buf59
del buf60
del buf61
del buf62
del buf63
buf65 = buf64
del buf64
buf66 = torch.ops.aten.slice_scatter.default(buf48, buf65, 2, 0, 4)
del buf48
del buf65
buf67 = buf66
del buf66
buf68 = torch.ops.aten.slice.Tensor(buf67, 2, 0, 4)
buf69 = buf68
buf70 = torch.ops.aten.slice.Tensor(buf69, 3, 0, 4)
buf71 = buf70
buf72 = torch.ops.aten.slice.Tensor(buf67, 2, 0, 4)
buf73 = buf72
buf74 = torch.ops.aten.slice_scatter.default(buf73, buf71, 3, 0, 4)
del buf68
del buf69
del buf70
del buf71
del buf72
del buf73
buf75 = buf74
del buf74
buf76 = torch.ops.aten.slice_scatter.default(buf67, buf75, 2, 0, 4)
del buf67
del buf75
buf77 = buf76
del buf76
buf78 = torch.ops.aten._fft_c2r.default(buf77, [2, 3], 2, 8)
del buf77
buf79 = buf78
del buf78
buf80 = torch.ops.aten.permute.default(buf24, [0, 2, 1])
buf81 = buf80
buf82 = torch.ops.aten._conj.default(buf81)
buf83 = buf82
return reinterpret_tensor(buf79, (4, 4, 4, 4), (256, 64, 8, 1), 0
), buf4, buf83
class FourierConv2dNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, size_x, size_y, bias=True,
periodic=False):
super(FourierConv2dNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if not periodic:
self.size_x = size_x
self.size_y = size_y
else:
self.size_x = size_x // 2
self.size_y = size_y // 2
self.weights = torch.nn.Parameter(torch.view_as_real(1 / (
in_channels * out_channels) * torch.rand(in_channels,
out_channels, self.size_x, self.size_y, dtype=torch.cfloat)))
self.biases = torch.nn.Parameter(torch.view_as_real(1 /
out_channels * torch.rand(out_channels, self.size_x, self.
size_y, dtype=torch.cfloat)))
self.bias = bias
self.periodic = periodic
def forward(self, input_0):
primals_2 = self.weights
primals_3 = self.biases
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| julian-parker/DAFX22_FNO | FourierConv2d | false | 3,793 | [
"MIT"
] | 0 | 72f30144317a3f8ba8ea23ecf9a0333c81fc87db | https://github.com/julian-parker/DAFX22_FNO/tree/72f30144317a3f8ba8ea23ecf9a0333c81fc87db | import torch
class Model(torch.nn.Module):
def __init__(self, in_channels, out_channels, size_x, size_y, bias=True,
periodic=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if not periodic:
self.size_x = size_x
self.size_y = size_y
else:
self.size_x = size_x // 2
self.size_y = size_y // 2
self.weights = torch.nn.Parameter(torch.view_as_real(1 / (
in_channels * out_channels) * torch.rand(in_channels,
out_channels, self.size_x, self.size_y, dtype=torch.cfloat)))
self.biases = torch.nn.Parameter(torch.view_as_real(1 /
out_channels * torch.rand(out_channels, self.size_x, self.
size_y, dtype=torch.cfloat)))
self.bias = bias
self.periodic = periodic
def forward(self, x):
if not self.periodic:
x = torch.nn.functional.pad(x, [0, self.size_y, 0, self.size_x])
x_ft = torch.fft.rfft2(x)
out_ft = torch.zeros_like(x_ft)
out_ft[:, :, :self.size_x, :self.size_y] = torch.einsum(
'bixy,ioxy->boxy', x_ft[:, :, :self.size_x, :self.size_y],
torch.view_as_complex(self.weights))
if self.bias:
out_ft[:, :, :self.size_x, :self.size_y] += torch.view_as_complex(
self.biases)
out = torch.fft.irfft2(out_ft)
if not self.periodic:
out = out[..., :self.size_x, :self.size_y]
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'size_x': 4, 'size_y': 4}
]
|
RMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/iw/ciw2tzflxj3uv7uuhphbhiz2n5c5nl3zoxfzhrpjczu2u7fh5vd2.py
# Topologically Sorted Source Nodes: [sub, tmp, loss, sqrt], Original ATen: [aten.sub, aten.pow, aten.mean, aten.sqrt]
# Source node to ATen node mapping:
# loss => mean
# sqrt => sqrt
# sub => sub
# tmp => pow_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
triton_per_fused_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_mean_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, tmp, loss, sqrt], Original ATen: [aten.sub, aten.pow, aten.mean, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_mean_pow_sqrt_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class RMSELoss(nn.Module):
def __init__(self):
super(RMSELoss, self).__init__()
def forward(self, inputs, targets):
tmp = (inputs - targets) ** 2
loss = torch.mean(tmp)
return torch.sqrt(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_pow_sqrt_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RMSELossNew(nn.Module):
def __init__(self):
super(RMSELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kamomehz/waveletCodingCNN | RMSELoss | false | 3,794 | [
"MIT"
] | 0 | 50c7db9d986039ded38999b7e4f4265e2250fb90 | https://github.com/kamomehz/waveletCodingCNN/tree/50c7db9d986039ded38999b7e4f4265e2250fb90 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs, targets):
tmp = (inputs - targets) ** 2
loss = torch.mean(tmp)
return torch.sqrt(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net_L2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/el/cel3ti6ei3rprs2l5m6qs62p6md67qhlcbr3oxhxsqfmherljfbo.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/b7/cb7yiqdigd2vu5it7f2y6axob3bgvkx2ecs3nmymezsrlxsu2jhl.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (256, 4), (4, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (32, 256), (256, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (2, 32), (32, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 256), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 32), (1, 256), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 128, grid=grid(128), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (32, 2), (1, 32), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (buf4, primals_1, buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net_L2(nn.Module):
def __init__(self, inputSize, kernel=64):
super(Net_L2, self).__init__()
self.inputSize = inputSize
self.kernel = kernel
self.fc1 = nn.Linear(self.inputSize, 256)
self.fc2 = nn.Linear(256, 32)
self.fc3 = nn.Linear(32, 2)
self.drop1 = nn.Dropout(0.1)
def forward(self, x):
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'inputSize': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (256, 4), (4, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (32, 256), (256, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (2, 32), (32, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 256),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 32), (1,
256), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(128)](buf3, primals_5, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(32, 2), (1, 32), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class Net_L2New(nn.Module):
def __init__(self, inputSize, kernel=64):
super(Net_L2New, self).__init__()
self.inputSize = inputSize
self.kernel = kernel
self.fc1 = nn.Linear(self.inputSize, 256)
self.fc2 = nn.Linear(256, 32)
self.fc3 = nn.Linear(32, 2)
self.drop1 = nn.Dropout(0.1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kamomehz/waveletCodingCNN | Net_L2 | false | 3,795 | [
"MIT"
] | 0 | 50c7db9d986039ded38999b7e4f4265e2250fb90 | https://github.com/kamomehz/waveletCodingCNN/tree/50c7db9d986039ded38999b7e4f4265e2250fb90 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, inputSize, kernel=64):
super().__init__()
self.inputSize = inputSize
self.kernel = kernel
self.fc1 = nn.Linear(self.inputSize, 256)
self.fc2 = nn.Linear(256, 32)
self.fc3 = nn.Linear(32, 2)
self.drop1 = nn.Dropout(0.1)
def forward(self, x):
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
ToContinuous | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3b/c3b62pdsbck73tub674e2ap2qhafl7nnia7qegg6orm2txneplfz.py
# Topologically Sorted Source Nodes: [max_1, float_2, sub, x_2, mul, x_3, setitem], Original ATen: [aten.max, aten._to_copy, aten.sub, aten.div, aten.mul, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# float_2 => convert_element_type_1
# max_1 => max_1
# mul => mul
# setitem => full_default, index_put
# sub => sub
# x_2 => div
# x_3 => sub_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%getitem_1, torch.float32), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%sub_1, [%bitwise_not], %full_default), kwargs = {})
triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tmp46 > tmp10
tmp48 = tmp47 == 0
tmp49 = tmp46.to(tl.float32)
tmp50 = 1.0
tmp51 = tmp49 - tmp50
tmp52 = 0.5
tmp53 = tmp51 * tmp52
tmp54 = 2.0
tmp55 = tmp53 * tmp54
tmp56 = tmp55 - tmp50
tmp57 = -1.0
tmp58 = tl.where(tmp48, tmp57, tmp56)
tl.store(out_ptr0 + (x2), tmp46, xmask)
tl.store(out_ptr1 + (x2), tmp58, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/j5/cj5xlqb2izp3v4stsdw4qkcghuulalwjuyl2kw4xl2gdfve2bely.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_4 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x2 = (xindex // 32)
x0 = xindex % 16
x3 = xindex
tmp0 = x2 + (4*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*(x2 + (4*x1)))), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x2 + (4*x1)))), tmp6 & xmask, other=0.0)
tmp10 = tmp9 > tmp1
tmp11 = tmp10.to(tl.float32)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, float_2, sub, x_2, mul, x_3, setitem], Original ATen: [aten.max, aten._to_copy, aten.sub, aten.div, aten.mul, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0.run(arg0_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf0, buf2, 128, grid=grid(128), stream=stream0)
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class ToContinuous(nn.Module):
def __init__(self):
super(ToContinuous, self).__init__()
def forward(self, x):
"""
:param x: tensor with dimension opt(batch x _ x bins x H x W
:return:
"""
assert len(x.shape) >= 3 and x.shape[-3] >= 2
*other_dims, C, H, W = x.shape
x = x.reshape(-1, C, H, W)
x = torch.max(x, dim=1).indices
sil = x > 0
sil_float = sil.float()
x = (x.float() - 1) / (C - 2)
x = 2 * x - 1
x[~sil] = -1
x = torch.stack((x, sil_float), dim=0).permute(1, 0, 2, 3).contiguous()
x = x.reshape(other_dims + [2, H, W])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0(in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tmp46 > tmp10
tmp48 = tmp47 == 0
tmp49 = tmp46.to(tl.float32)
tmp50 = 1.0
tmp51 = tmp49 - tmp50
tmp52 = 0.5
tmp53 = tmp51 * tmp52
tmp54 = 2.0
tmp55 = tmp53 * tmp54
tmp56 = tmp55 - tmp50
tmp57 = -1.0
tmp58 = tl.where(tmp48, tmp57, tmp56)
tl.store(out_ptr0 + x2, tmp46, xmask)
tl.store(out_ptr1 + x2, tmp58, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x2 = xindex // 32
x0 = xindex % 16
x3 = xindex
tmp0 = x2 + 4 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * (x2 + 4 * x1)), tmp4 & xmask, other=0.0
)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x2 + 4 * x1)), tmp6 & xmask,
other=0.0)
tmp10 = tmp9 > tmp1
tmp11 = tmp10.to(tl.float32)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_div_index_put_lift_fresh_max_mul_sub_0[grid
(64)](arg0_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(128)](buf1, buf0, buf2, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del buf0
del buf1
return buf2,
class ToContinuousNew(nn.Module):
def __init__(self):
super(ToContinuousNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| kampta/multiview-shapes | ToContinuous | false | 3,796 | [
"MIT"
] | 0 | a79eb4b492be8c2c279e2c69b13d5a19dff1621b | https://github.com/kampta/multiview-shapes/tree/a79eb4b492be8c2c279e2c69b13d5a19dff1621b | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
"""
:param x: tensor with dimension opt(batch x _ x bins x H x W
:return:
"""
assert len(x.shape) >= 3 and x.shape[-3] >= 2
*other_dims, C, H, W = x.shape
x = x.reshape(-1, C, H, W)
x = torch.max(x, dim=1).indices
sil = x > 0
sil_float = sil.float()
x = (x.float() - 1) / (C - 2)
x = 2 * x - 1
x[~sil] = -1
x = torch.stack((x, sil_float), dim=0).permute(1, 0, 2, 3).contiguous()
x = x.reshape(other_dims + [2, H, W])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DGMNConv3DLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/um/cum3blwu2htpx2crif32ul5hlhg3epf5grgotwowscrmmijdon4y.py
# Topologically Sorted Source Nodes: [conv3d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[33554432],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 33554432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 262144) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kh/ckhrlrt6uve2mgqip7qkizglf6pmn5pmkpbubgufifrxmsgrbxyo.py
# Topologically Sorted Source Nodes: [conv3d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv3d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 620928
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9702) % 16
x0 = xindex % 9702
x4 = (xindex // 9702)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (9728*x4)), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3, 3), (27, 27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3, 3), (864, 27, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64, 64), (8388608, 262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv3d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 33554432, grid=grid(33554432), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.max_pool3d_with_indices]
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [3, 3, 3], [3, 3, 3], [1, 0, 0])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
# Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 22, 21, 21), (155232, 9702, 441, 21, 1))
buf6 = empty_strided_cuda((4, 16, 22, 21, 21), (155648, 9728, 441, 21, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv3d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf5, primals_5, buf6, 620928, grid=grid(620928), stream=stream0)
del buf5
del primals_5
# Topologically Sorted Source Nodes: [outputs_1], Original ATen: [aten.max_pool3d_with_indices]
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [3, 3, 3], [3, 3, 3], [1, 0, 0])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
return (reinterpret_tensor(buf8, (4, 6272), (6272, 1), 0), primals_1, primals_3, primals_4, buf1, buf3, buf4, buf6, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 3, 3, 3), (27, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32, 3, 3, 3), (864, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.init as init
class DGMNConv3DLayer(nn.Module):
def __init__(self, args):
self.args = args
super(DGMNConv3DLayer, self).__init__()
self.conv1 = nn.Conv3d(in_channels=1, out_channels=32, kernel_size=
(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3),
padding=(1, 0, 0))
self.conv2 = nn.Conv3d(in_channels=32, out_channels=16, kernel_size
=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3),
padding=(1, 0, 0))
self.flatten = nn.Flatten()
self.init_weight()
def init_weight(self):
init.xavier_uniform_(self.conv1.weight)
init.constant_(self.conv1.bias, 0.0)
init.xavier_uniform_(self.conv2.weight)
init.constant_(self.conv2.bias, 0.0)
def forward(self, cube):
outputs = self.pool1(torch.relu(self.conv1(cube)))
outputs = self.pool2(torch.relu(self.conv2(outputs)))
outputs = self.flatten(outputs)
return outputs
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config()}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 620928
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9702 % 16
x0 = xindex % 9702
x4 = xindex // 9702
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 9728 * x4), tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3, 3), (27, 27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
assert_size_stride(primals_4, (16, 32, 3, 3, 3), (864, 27, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64, 64), (8388608, 262144,
4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(33554432)](buf1, primals_2,
33554432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [3, 3,
3], [3, 3, 3], [1, 0, 0])
buf3 = buf2[0]
buf4 = buf2[1]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 22, 21, 21), (155232, 9702, 441,
21, 1))
buf6 = empty_strided_cuda((4, 16, 22, 21, 21), (155648, 9728, 441,
21, 1), torch.float32)
triton_poi_fused_convolution_relu_1[grid(620928)](buf5, primals_5,
buf6, 620928, XBLOCK=512, num_warps=8, num_stages=1)
del buf5
del primals_5
buf7 = torch.ops.aten.max_pool3d_with_indices.default(buf6, [3, 3,
3], [3, 3, 3], [1, 0, 0])
buf8 = buf7[0]
buf9 = buf7[1]
del buf7
return reinterpret_tensor(buf8, (4, 6272), (6272, 1), 0
), primals_1, primals_3, primals_4, buf1, buf3, buf4, buf6, buf9
class DGMNConv3DLayerNew(nn.Module):
def __init__(self, args):
self.args = args
super(DGMNConv3DLayerNew, self).__init__()
self.conv1 = nn.Conv3d(in_channels=1, out_channels=32, kernel_size=
(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3),
padding=(1, 0, 0))
self.conv2 = nn.Conv3d(in_channels=32, out_channels=16, kernel_size
=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3),
padding=(1, 0, 0))
self.flatten = nn.Flatten()
self.init_weight()
def init_weight(self):
init.xavier_uniform_(self.conv1.weight)
init.constant_(self.conv1.bias, 0.0)
init.xavier_uniform_(self.conv2.weight)
init.constant_(self.conv2.bias, 0.0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Coldog2333/DGMN-pytorch | DGMNConv3DLayer | false | 3,797 | [
"Apache-2.0"
] | 0 | c34248afca516625c2ac2fc6d6f4ce8fe2988c99 | https://github.com/Coldog2333/DGMN-pytorch/tree/c34248afca516625c2ac2fc6d6f4ce8fe2988c99 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.init as init
class Model(nn.Module):
def __init__(self, args):
self.args = args
super().__init__()
self.conv1 = nn.Conv3d(in_channels=1, out_channels=32, kernel_size=
(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3),
padding=(1, 0, 0))
self.conv2 = nn.Conv3d(in_channels=32, out_channels=16, kernel_size
=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(3, 3, 3),
padding=(1, 0, 0))
self.flatten = nn.Flatten()
self.init_weight()
def init_weight(self):
init.xavier_uniform_(self.conv1.weight)
init.constant_(self.conv1.bias, 0.0)
init.xavier_uniform_(self.conv2.weight)
init.constant_(self.conv2.bias, 0.0)
def forward(self, cube):
outputs = self.pool1(torch.relu(self.conv1(cube)))
outputs = self.pool2(torch.relu(self.conv2(outputs)))
outputs = self.flatten(outputs)
return outputs
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return []
|
teacherNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/km/ckm7e2xx3g4foqtxyzhywto7foc63q7nowhdvpkow4bywiojncmn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1200
x1 = (xindex // 1200)
tmp0 = tl.load(in_out_ptr0 + (x0 + (1216*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + (1216*x1)), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (1200, 784), (784, 1))
assert_size_stride(primals_3, (1200, ), (1, ))
assert_size_stride(primals_4, (1200, 1200), (1200, 1))
assert_size_stride(primals_5, (1200, ), (1, ))
assert_size_stride(primals_6, (10, 1200), (1200, 1))
assert_size_stride(primals_7, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 1200), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 4800, grid=grid(4800), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1200, 1200), (1, 1200), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 4800, grid=grid(4800), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (1200, 10), (1, 1200), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (buf4, primals_1, buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1200, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1200, 1200), (1200, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 1200), (1200, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class teacherNet(nn.Module):
def __init__(self):
super(teacherNet, self).__init__()
self.fc1 = nn.Linear(28 * 28, 1200)
self.fc2 = nn.Linear(1200, 1200)
self.fc3 = nn.Linear(1200, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.8, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=0.8, training=self.training)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1200
x1 = xindex // 1200
tmp0 = tl.load(in_out_ptr0 + (x0 + 1216 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 1216 * x1), tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (1200, 784), (784, 1))
assert_size_stride(primals_3, (1200,), (1,))
assert_size_stride(primals_4, (1200, 1200), (1200, 1))
assert_size_stride(primals_5, (1200,), (1,))
assert_size_stride(primals_6, (10, 1200), (1200, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
1200), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4800)](buf1, primals_3, 4800, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1200, 1200),
(1, 1200), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(4800)](buf3, primals_5, 4800, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(1200, 10), (1, 1200), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class teacherNetNew(nn.Module):
def __init__(self):
super(teacherNetNew, self).__init__()
self.fc1 = nn.Linear(28 * 28, 1200)
self.fc2 = nn.Linear(1200, 1200)
self.fc3 = nn.Linear(1200, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kamiyakenta/knowledge-distillation-pytorch | teacherNet | false | 3,798 | [
"MIT"
] | 0 | 749c6bb353961147718371b2b694046af0a6e3f1 | https://github.com/kamiyakenta/knowledge-distillation-pytorch/tree/749c6bb353961147718371b2b694046af0a6e3f1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28 * 28, 1200)
self.fc2 = nn.Linear(1200, 1200)
self.fc3 = nn.Linear(1200, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.8, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=0.8, training=self.training)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return []
|
ToRGB | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vr/cvrpotvk7sdeldoy74wgzn6gs5gaq4ppvzjlzobaslrmxhyft34w.py
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_2 => mul_2
# weight => mul_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/63/c63qmle3vu5ahnnf2fovdaaeaihgig2xncwyc2hocygztkcg2qst.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 1, 4, 1, 1), (4, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm]
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = reinterpret_tensor(buf0, (4, 1, 4, 1, 1), (4, 1, 1, 4, 4), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_5, buf2, buf3, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf4, (1, 4, 4, 4), (64, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf5, primals_6, 64, grid=grid(64), stream=stream0)
del primals_6
return (buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 1, 4, 1, 1), (4, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.utils.data
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0],
pad[1], pad[0], pad[1]))
return out
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, output_channels=1, upsample=
True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, output_channels, 1,
style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, output_channels, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import math
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 1, 4, 1, 1), (4, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = reinterpret_tensor(buf0, (4, 1, 4, 1, 1), (4, 1, 1, 4, 4), 0)
del buf0
triton_poi_fused_mul_2[grid(16)](primals_5, buf2, buf3, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4,
1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf4, (1, 4, 4, 4), (64, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf4
triton_poi_fused_add_3[grid(64)](buf5, primals_6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_6
return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (4, 4,
1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4
), (256, 16, 4, 1), 0)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0],
pad[1], pad[0], pad[1]))
return out
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class ToRGBNew(nn.Module):
def __init__(self, in_channel, style_dim, output_channels=1, upsample=
True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, output_channels, 1,
style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, output_channels, 1, 1))
def forward(self, input_0, input_1):
primals_6 = self.bias
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| kampta/multiview-shapes | ToRGB | false | 3,799 | [
"MIT"
] | 0 | a79eb4b492be8c2c279e2c69b13d5a19dff1621b | https://github.com/kampta/multiview-shapes/tree/a79eb4b492be8c2c279e2c69b13d5a19dff1621b | from torch.autograd import Function
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.utils.data
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0],
pad[1], pad[0], pad[1]))
return out
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
# ... truncated (>4000 chars) for memory efficiency |
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sr/csrxdjbtbkq5mhx4lx76hdeti625uy52jalpuc5xjwghomvl635m.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4c/c4cmdkv2tha5eqdeihje5rklwl6lelhv4tvw7t54sepa7dox7li5.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 150
x2 = xindex % 2400
x3 = (xindex // 2400)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (2432*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (150, 200), (200, 1))
assert_size_stride(primals_5, (150, ), (1, ))
assert_size_stride(primals_6, (4, 150), (150, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 12800, grid=grid(12800), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 150), (150, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 150), (1, 200), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 150), (2400, 600, 150, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 150), (2432, 600, 150, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 9600, grid=grid(9600), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 150), (150, 1), 0), reinterpret_tensor(primals_6, (150, 4), (1, 150), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 200), (200, 1), 0), reinterpret_tensor(buf3, (64, 150), (150, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((200, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((150, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((150, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 150), (150, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, input_size, output_size, seed, fc1_units=200,
fc2_units=150):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(input_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, output_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 9600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 150
x2 = xindex % 2400
x3 = xindex // 2400
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 2432 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (200, 4), (4, 1))
assert_size_stride(primals_2, (200,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (150, 200), (200, 1))
assert_size_stride(primals_5, (150,), (1,))
assert_size_stride(primals_6, (4, 150), (150, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 200), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(12800)](buf1,
primals_2, buf7, 12800, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 150), (150, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 150), (1, 200), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 150), (2400, 600, 150, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 150), (2432, 600, 150, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(9600)](buf3,
primals_5, buf6, 9600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 150), (150, 1), 0),
reinterpret_tensor(primals_6, (150, 4), (1, 150), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 200), (200, 1), 0
), reinterpret_tensor(buf3, (64, 150), (150, 1), 0
), buf5, primals_6, buf6, primals_4, buf7
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class ActorNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, input_size, output_size, seed, fc1_units=200,
fc2_units=150):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(ActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(input_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, output_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kangjie-chen/deep-reinforcement-learning | Actor | false | 3,801 | [
"MIT"
] | 0 | 0706f136834ecafc7391f483a6b3c84365a349eb | https://github.com/kangjie-chen/deep-reinforcement-learning/tree/0706f136834ecafc7391f483a6b3c84365a349eb | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Model(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, input_size, output_size, seed, fc1_units=200,
fc2_units=150):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(input_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, output_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Feature_extraction | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7e/c7emkemmywdizbc2dk22jgylyayrytgas3pkynkv2rcugfx3fbgp.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rl/crlmif6ewhfi2tedxicij26ycx75a6dts2w4ydzxsb7j2f5eci7b.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dx/cdx5ml2qpofihmmpnvabqkpaoyptwmwdx4jtjzptieewtlhrqlmf.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gk/cgkrz64fcc2eli3eoxwa3qsa6ops2xfz7xlm6itja4z6qx7ito5i.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (2048*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vm/cvma2kvkmwm363cnxdvxqkblt4eh4rh356nszykytfjr4jagdpmq.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (2048*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2a/c2a57sa76otqfun5vyz3kyqtii73mw3oj7tgngabbgjqkrs3lo6f.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 16
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (4096*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/j2/cj2xupuep7d37lu4g2t2376bhdcj5sxfkgbhsgljnlms4lrlswgw.py
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ke/ckek2acdmo7sv7npzozy22eoys4vm54fbzq7j5pk35kbrudjtg4c.py
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1218816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hx/chxpv56yvpb3zbpepjjm64yvu3olkhykpo55im2qebqeu2vqt3xy.py
# Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 313600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 2240) % 35
x1 = (xindex // 64) % 35
x0 = xindex % 64
x3 = (xindex // 78400)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 69, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-4480) + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp10 & xmask, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4416) + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp16 & xmask, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-4352) + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp23 & xmask, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-64) + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp30 & xmask, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp33 & xmask, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp36 & xmask, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (4352 + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp43 & xmask, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4416 + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp46 & xmask, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4480 + x0 + (128*x1) + (8832*x2) + (304704*x3)), tmp49 & xmask, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, xmask)
tl.store(out_ptr1 + (x6), tmp76, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/on/con2u2mvd4g2yvx3qrsgpco7js2l5uoq2rgdfk7jy5q6mbawa7dl.py
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out3 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 819200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/65/c654rapxvqkc2mftcclotifezamjrwpb2n6jhlm7ewymot7uybn5.py
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out4 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_11 = async_compile.triton('triton_poi_fused_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1036800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jg/cjgmbw4efkju7ysizyshtj5fz2hpqbatzstxenck6kjlgl3g2dlw.py
# Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# pool2 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 270848
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 2944) % 23
x1 = (xindex // 128) % 23
x0 = xindex % 128
x3 = (xindex // 67712)
x6 = xindex
tmp0 = (-1) + (2*x2)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 45, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x1)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5888) + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp10 & xmask, other=float("-inf"))
tmp12 = 2*x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-5760) + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp16 & xmask, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-5632) + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp23 & xmask, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-128) + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp30 & xmask, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp33 & xmask, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp36 & xmask, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (5632 + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp43 & xmask, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (5760 + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp46 & xmask, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5888 + x0 + (256*x1) + (11520*x2) + (259200*x3)), tmp49 & xmask, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, xmask)
tl.store(out_ptr1 + (x6), tmp76, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kt/cktz47jtftmjipgwbyvzq2jfwxxenj6mqul2epvhtm2wnymzintt.py
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out5 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 802816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wf/cwfjjyvdcfqpgdcbtukubmsn6fuqwddejp54uj54mj64vtk2ufkf.py
# Topologically Sorted Source Nodes: [out6], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out6 => convolution_5
# Graph fragment:
# %convolution_5 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_12, %primals_13, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_14 = async_compile.triton('triton_poi_fused_convolution_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7a/c7auudnfo3lwp7u4b3fp5oqgl6nywuwlrj5fg2uzzsnrblubrixz.py
# Topologically Sorted Source Nodes: [out7], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out7 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_5, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_15 = async_compile.triton('triton_poi_fused_convolution_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y4/cy4knb4jyz74gjv3a6agnbdrsxfijprdgfp74nuiautp5zjful6j.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_6, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_16 = async_compile.triton('triton_poi_fused_convolution_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 2048], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_16(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 88
xnumel = 1089
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 22
y1 = (yindex // 22)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (22*x2) + (23958*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (1089*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (22, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_17, (22, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 192, 25, grid=grid(192, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 4096, 16, grid=grid(4096, 16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 16, grid=grid(8192, 16), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 16384, 16, grid=grid(16384, 16), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_10, buf5, 32768, 16, grid=grid(32768, 16), stream=stream0)
del primals_10
buf6 = empty_strided_cuda((256, 256, 4, 4), (4096, 1, 1024, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_12, buf6, 65536, 16, grid=grid(65536, 16), stream=stream0)
del primals_12
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf8, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 69, 69), (304704, 1, 4416, 64))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_8.run(buf10, primals_5, 1218816, grid=grid(1218816), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 64, 35, 35), (78400, 1, 2240, 64), torch.float32)
buf12 = empty_strided_cuda((4, 64, 35, 35), (78400, 1, 2240, 64), torch.int8)
# Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_9.run(buf10, buf11, buf12, 313600, grid=grid(313600), stream=stream0)
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 40, 40), (204800, 1, 5120, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf14, primals_7, 819200, grid=grid(819200), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 128, 45, 45), (259200, 1, 5760, 128))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_11.run(buf16, primals_9, 1036800, grid=grid(1036800), stream=stream0)
del primals_9
buf17 = empty_strided_cuda((4, 128, 23, 23), (67712, 1, 2944, 128), torch.float32)
buf18 = empty_strided_cuda((4, 128, 23, 23), (67712, 1, 2944, 128), torch.int8)
# Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf16, buf17, buf18, 270848, grid=grid(270848), stream=stream0)
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf17, buf5, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 256, 28, 28), (200704, 1, 7168, 256))
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf20, primals_11, 802816, grid=grid(802816), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [out6], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, buf6, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 33, 33), (278784, 1, 8448, 256))
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [out6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_14.run(buf22, primals_13, 1115136, grid=grid(1115136), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [out7], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 128, 33, 33), (139392, 1, 4224, 128))
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [out7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_15.run(buf24, primals_15, 557568, grid=grid(557568), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 22, 33, 33), (23958, 1, 726, 22))
buf26 = empty_strided_cuda((4, 22, 33, 33), (23958, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_16.run(buf25, primals_17, buf26, 88, 1089, grid=grid(88, 1089), stream=stream0)
del buf25
del primals_17
return (buf26, buf0, buf1, buf2, buf3, buf4, buf5, buf6, primals_14, primals_16, buf8, buf10, buf11, buf12, buf14, buf16, buf17, buf18, buf20, buf22, buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((22, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((22, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision import transforms as transforms
import torch.nn as nn
class Feature_extraction(nn.Module):
def __init__(self, k, p):
super(Feature_extraction, self).__init__()
self.conv_1 = nn.Conv2d(3, 64, kernel_size=5, padding=2)
self.conv_2 = nn.Conv2d(64, 64, kernel_size=k, padding=p)
self.conv_3 = nn.Conv2d(64, 128, kernel_size=k, padding=p)
self.conv_4 = nn.Conv2d(128, 128, kernel_size=k, padding=p)
self.conv_5 = nn.Conv2d(128, 256, kernel_size=k, padding=p)
self.conv_6 = nn.Conv2d(256, 256, kernel_size=k, padding=p)
self.conv_7 = nn.Conv2d(256, 128, 1)
self.conv_8 = nn.Conv2d(128, 22, 1)
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
out1 = self.conv_1(x)
out2 = self.conv_2(out1)
pool1 = self.pool(out2)
out3 = self.conv_3(pool1)
out4 = self.conv_4(out3)
pool2 = self.pool(out4)
out5 = self.conv_5(pool2)
out6 = self.conv_6(out5)
out7 = self.conv_7(out6)
out = self.conv_8(out7)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'k': 4, 'p': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision import transforms as transforms
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 4096 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1218816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 313600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 2240 % 35
x1 = xindex // 64 % 35
x0 = xindex % 64
x3 = xindex // 78400
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 69, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-4480 + x0 + 128 * x1 + 8832 * x2 + 304704 *
x3), tmp10 & xmask, other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4416 + x0 + 128 * x1 + 8832 * x2 + 304704 *
x3), tmp16 & xmask, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-4352 + x0 + 128 * x1 + 8832 * x2 + 304704 *
x3), tmp23 & xmask, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-64 + x0 + 128 * x1 + 8832 * x2 + 304704 *
x3), tmp30 & xmask, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8832 * x2 + 304704 * x3),
tmp33 & xmask, other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8832 * x2 + 304704 * x3
), tmp36 & xmask, other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (4352 + x0 + 128 * x1 + 8832 * x2 + 304704 *
x3), tmp43 & xmask, other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4416 + x0 + 128 * x1 + 8832 * x2 + 304704 *
x3), tmp46 & xmask, other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (4480 + x0 + 128 * x1 + 8832 * x2 + 304704 *
x3), tmp49 & xmask, other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, xmask)
tl.store(out_ptr1 + x6, tmp76, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1036800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 270848
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 2944 % 23
x1 = xindex // 128 % 23
x0 = xindex % 128
x3 = xindex // 67712
x6 = xindex
tmp0 = -1 + 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 45, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5888 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp10 & xmask, other=float('-inf'))
tmp12 = 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-5760 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp16 & xmask, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-5632 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp23 & xmask, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-128 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp30 & xmask, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x0 + 256 * x1 + 11520 * x2 + 259200 * x3),
tmp33 & xmask, other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp36 & xmask, other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (5632 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp43 & xmask, other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (5760 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp46 & xmask, other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5888 + x0 + 256 * x1 + 11520 * x2 + 259200 *
x3), tmp49 & xmask, other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, xmask)
tl.store(out_ptr1 + x6, tmp76, xmask)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_16(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 88
xnumel = 1089
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 22
y1 = yindex // 22
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 22 * x2 + 23958 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 1089 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (22, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_17, (22,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(192, 25)](primals_1, buf0, 192, 25, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch
.float32)
triton_poi_fused_2[grid(4096, 16)](primals_4, buf2, 4096, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 16)](primals_8, buf4, 16384, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 16)](primals_10, buf5, 32768, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((256, 256, 4, 4), (4096, 1, 1024, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 16)](primals_12, buf6, 65536, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf7 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf8 = buf7
del buf7
triton_poi_fused_convolution_7[grid(1048576)](buf8, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf9 = extern_kernels.convolution(buf8, buf2, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 69, 69), (304704, 1, 4416, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_8[grid(1218816)](buf10, primals_5,
1218816, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 64, 35, 35), (78400, 1, 2240, 64),
torch.float32)
buf12 = empty_strided_cuda((4, 64, 35, 35), (78400, 1, 2240, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(313600)](buf10,
buf11, buf12, 313600, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 40, 40), (204800, 1, 5120, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_10[grid(819200)](buf14, primals_7,
819200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 128, 45, 45), (259200, 1, 5760, 128))
buf16 = buf15
del buf15
triton_poi_fused_convolution_11[grid(1036800)](buf16, primals_9,
1036800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((4, 128, 23, 23), (67712, 1, 2944, 128),
torch.float32)
buf18 = empty_strided_cuda((4, 128, 23, 23), (67712, 1, 2944, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_12[grid(270848)](buf16,
buf17, buf18, 270848, XBLOCK=512, num_warps=8, num_stages=1)
buf19 = extern_kernels.convolution(buf17, buf5, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 256, 28, 28), (200704, 1, 7168, 256))
buf20 = buf19
del buf19
triton_poi_fused_convolution_13[grid(802816)](buf20, primals_11,
802816, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf21 = extern_kernels.convolution(buf20, buf6, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 33, 33), (278784, 1, 8448, 256))
buf22 = buf21
del buf21
triton_poi_fused_convolution_14[grid(1115136)](buf22, primals_13,
1115136, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf23 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 128, 33, 33), (139392, 1, 4224, 128))
buf24 = buf23
del buf23
triton_poi_fused_convolution_15[grid(557568)](buf24, primals_15,
557568, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf25 = extern_kernels.convolution(buf24, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 22, 33, 33), (23958, 1, 726, 22))
buf26 = empty_strided_cuda((4, 22, 33, 33), (23958, 1089, 33, 1),
torch.float32)
triton_poi_fused_convolution_16[grid(88, 1089)](buf25, primals_17,
buf26, 88, 1089, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf25
del primals_17
return (buf26, buf0, buf1, buf2, buf3, buf4, buf5, buf6, primals_14,
primals_16, buf8, buf10, buf11, buf12, buf14, buf16, buf17, buf18,
buf20, buf22, buf24)
class Feature_extractionNew(nn.Module):
def __init__(self, k, p):
super(Feature_extractionNew, self).__init__()
self.conv_1 = nn.Conv2d(3, 64, kernel_size=5, padding=2)
self.conv_2 = nn.Conv2d(64, 64, kernel_size=k, padding=p)
self.conv_3 = nn.Conv2d(64, 128, kernel_size=k, padding=p)
self.conv_4 = nn.Conv2d(128, 128, kernel_size=k, padding=p)
self.conv_5 = nn.Conv2d(128, 256, kernel_size=k, padding=p)
self.conv_6 = nn.Conv2d(256, 256, kernel_size=k, padding=p)
self.conv_7 = nn.Conv2d(256, 128, 1)
self.conv_8 = nn.Conv2d(128, 22, 1)
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, input_0):
primals_1 = self.conv_1.weight
primals_2 = self.conv_1.bias
primals_4 = self.conv_2.weight
primals_5 = self.conv_2.bias
primals_6 = self.conv_3.weight
primals_7 = self.conv_3.bias
primals_8 = self.conv_4.weight
primals_9 = self.conv_4.bias
primals_10 = self.conv_5.weight
primals_11 = self.conv_5.bias
primals_12 = self.conv_6.weight
primals_13 = self.conv_6.bias
primals_14 = self.conv_7.weight
primals_15 = self.conv_7.bias
primals_16 = self.conv_8.weight
primals_17 = self.conv_8.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| justinluyao/phd_thesis | Feature_extraction | false | 3,803 | [
"MIT"
] | 0 | 0a61f5deaac86dd34839ce24c2ad89e1411a8540 | https://github.com/justinluyao/phd_thesis/tree/0a61f5deaac86dd34839ce24c2ad89e1411a8540 | import torch
from torchvision import transforms as transforms
import torch.nn as nn
class Model(nn.Module):
def __init__(self, k, p):
super().__init__()
self.conv_1 = nn.Conv2d(3, 64, kernel_size=5, padding=2)
self.conv_2 = nn.Conv2d(64, 64, kernel_size=k, padding=p)
self.conv_3 = nn.Conv2d(64, 128, kernel_size=k, padding=p)
self.conv_4 = nn.Conv2d(128, 128, kernel_size=k, padding=p)
self.conv_5 = nn.Conv2d(128, 256, kernel_size=k, padding=p)
self.conv_6 = nn.Conv2d(256, 256, kernel_size=k, padding=p)
self.conv_7 = nn.Conv2d(256, 128, 1)
self.conv_8 = nn.Conv2d(128, 22, 1)
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
out1 = self.conv_1(x)
out2 = self.conv_2(out1)
pool1 = self.pool(out2)
out3 = self.conv_3(pool1)
out4 = self.conv_4(out3)
pool2 = self.pool(out4)
out5 = self.conv_5(pool2)
out6 = self.conv_6(out5)
out7 = self.conv_7(out6)
out = self.conv_8(out7)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4, 4]
|
CmapPafHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ca/ccalm3hakjc3y6pyouzotjhedyolmiuxj66frcn7j2bllcgh7gjc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (1048576*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pi/cpiqq6ydqends5jgnpbakdwghpnovztoymnjrsnfd3ycpt6b7cp7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4q/c4qmtshyh2quqp4mfbem56kakgs5tn4mkrnwi3suy4oa6joff42a.py
# Topologically Sorted Source Nodes: [conv2d, ac, conv2d_1, ap, mul, mul_1], Original ATen: [aten.convolution, aten.sigmoid, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# ac => sigmoid
# ap => tanh
# conv2d => convolution
# conv2d_1 => convolution_1
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %tanh), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_tanh_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x2), None)
tmp4 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x2), None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = libdevice.tanh(tmp2)
tmp10 = tmp6 * tmp9
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(in_out_ptr1 + (x2), tmp5, None)
tl.store(out_ptr0 + (x2), tmp8, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pn/cpngdclw7lzjwmfou3d3kip6qoxuvwgrgxlt5gs2kd5udjsfz5ny.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16384*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 1024, 4096, grid=grid(1024, 4096), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf6 = buf5; del buf5 # reuse
buf4 = buf3; del buf3 # reuse
buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, ac, conv2d_1, ap, mul, mul_1], Original ATen: [aten.convolution, aten.sigmoid, aten.tanh, aten.mul]
triton_poi_fused_convolution_mul_sigmoid_tanh_2.run(buf6, buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, grid=grid(4194304), stream=stream0)
del primals_3
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4))
buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf8, primals_7, buf9, 16, 4096, grid=grid(16, 4096), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4))
buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf11, primals_9, buf12, 16, 4096, grid=grid(16, 4096), stream=stream0)
del buf11
del primals_9
return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6, buf7, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn
import torch.optim
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHeadAttention(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHeadAttention, self).__init__()
self.cmap_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.paf_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.cmap_att = torch.nn.Conv2d(upsample_channels,
upsample_channels, kernel_size=3, stride=1, padding=1)
self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels,
kernel_size=3, stride=1, padding=1)
self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x):
xc = self.cmap_up(x)
ac = torch.sigmoid(self.cmap_att(xc))
xp = self.paf_up(x)
ap = torch.tanh(self.paf_att(xp))
return self.cmap_conv(xc * ac), self.paf_conv(xp * ap)
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x2, None)
tmp4 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x2, None)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = libdevice.tanh(tmp2)
tmp10 = tmp6 * tmp9
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(in_out_ptr1 + x2, tmp5, None)
tl.store(out_ptr0 + x2, tmp8, None)
tl.store(out_ptr1 + x2, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1))
assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(65536, 9)](primals_4, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf6 = buf5
del buf5
buf4 = buf3
del buf3
buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384,
256), torch.float32)
triton_poi_fused_convolution_mul_sigmoid_tanh_2[grid(4194304)](buf6,
buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_3
del primals_5
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4))
buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(16, 4096)](buf8, primals_7,
buf9, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4))
buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64,
1), 0)
del buf8
triton_poi_fused_convolution_3[grid(16, 4096)](buf11, primals_9,
buf12, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1)
del buf11
del primals_9
return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6,
buf7, buf10)
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super(UpsampleCBR, self).__init__(*layers)
class CmapPafHeadAttentionNew(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super(CmapPafHeadAttentionNew, self).__init__()
self.cmap_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.paf_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.cmap_att = torch.nn.Conv2d(upsample_channels,
upsample_channels, kernel_size=3, stride=1, padding=1)
self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels,
kernel_size=3, stride=1, padding=1)
self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, input_0):
primals_2 = self.cmap_att.weight
primals_3 = self.cmap_att.bias
primals_4 = self.paf_att.weight
primals_5 = self.paf_att.bias
primals_6 = self.cmap_conv.weight
primals_7 = self.cmap_conv.bias
primals_8 = self.paf_conv.weight
primals_9 = self.paf_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| intflow/trt_openpose | CmapPafHeadAttention | false | 3,805 | [
"MIT"
] | 0 | 526b1b0d463f1c86a45ca4d4cd77a41732c7654b | https://github.com/intflow/trt_openpose/tree/526b1b0d463f1c86a45ca4d4cd77a41732c7654b | import torch
import torch.utils.data
import torch.nn
import torch.optim
class UpsampleCBR(torch.nn.Sequential):
def __init__(self, input_channels, output_channels, count=1, num_flat=0):
layers = []
for i in range(count):
if i == 0:
inch = input_channels
else:
inch = output_channels
layers += [torch.nn.ConvTranspose2d(inch, output_channels,
kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d(
output_channels), torch.nn.ReLU()]
for i in range(num_flat):
layers += [torch.nn.Conv2d(output_channels, output_channels,
kernel_size=3, stride=1, padding=1), torch.nn.
BatchNorm2d(output_channels), torch.nn.ReLU()]
super().__init__(*layers)
class Model(torch.nn.Module):
def __init__(self, input_channels, cmap_channels, paf_channels,
upsample_channels=256, num_upsample=0, num_flat=0):
super().__init__()
self.cmap_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.paf_up = UpsampleCBR(input_channels, upsample_channels,
num_upsample, num_flat)
self.cmap_att = torch.nn.Conv2d(upsample_channels,
upsample_channels, kernel_size=3, stride=1, padding=1)
self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels,
kernel_size=3, stride=1, padding=1)
self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels,
kernel_size=1, stride=1, padding=0)
self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, x):
xc = self.cmap_up(x)
ac = torch.sigmoid(self.cmap_att(xc))
xp = self.paf_up(x)
ap = torch.tanh(self.paf_att(xp))
return self.cmap_conv(xc * ac), self.paf_conv(xp * ap)
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [4, 4, 4]
|
KLNormal | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sa/csabhmg42rapbjnmneye43x4d3qgef5bza6vhg2qxtde6ctfm2hr.py
# Topologically Sorted Source Nodes: [log, log_1, sub, truediv, add, sub_1, pow_1, truediv_1, add_1, sub_2, element_wise, kl], Original ATen: [aten.log, aten.sub, aten.div, aten.add, aten.pow, aten.mul, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# element_wise => mul
# kl => sum_1
# log => log
# log_1 => log_1
# pow_1 => pow_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %div), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_1, %arg0_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %div_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
triton_poi_fused_add_div_log_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_add_div_log_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_log_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tmp2 / tmp0
tmp6 = tmp4 + tmp5
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 / tmp0
tmp12 = tmp6 + tmp11
tmp13 = 1.0
tmp14 = tmp12 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp18 = tl_math.log(tmp17)
tmp20 = tl_math.log(tmp19)
tmp21 = tmp18 - tmp20
tmp22 = tmp19 / tmp17
tmp23 = tmp21 + tmp22
tmp26 = tmp24 - tmp25
tmp27 = tmp26 * tmp26
tmp28 = tmp27 / tmp17
tmp29 = tmp23 + tmp28
tmp30 = tmp29 - tmp13
tmp31 = tmp30 * tmp15
tmp32 = tmp16 + tmp31
tmp34 = tl_math.log(tmp33)
tmp36 = tl_math.log(tmp35)
tmp37 = tmp34 - tmp36
tmp38 = tmp35 / tmp33
tmp39 = tmp37 + tmp38
tmp42 = tmp40 - tmp41
tmp43 = tmp42 * tmp42
tmp44 = tmp43 / tmp33
tmp45 = tmp39 + tmp44
tmp46 = tmp45 - tmp13
tmp47 = tmp46 * tmp15
tmp48 = tmp32 + tmp47
tmp50 = tl_math.log(tmp49)
tmp52 = tl_math.log(tmp51)
tmp53 = tmp50 - tmp52
tmp54 = tmp51 / tmp49
tmp55 = tmp53 + tmp54
tmp58 = tmp56 - tmp57
tmp59 = tmp58 * tmp58
tmp60 = tmp59 / tmp49
tmp61 = tmp55 + tmp60
tmp62 = tmp61 - tmp13
tmp63 = tmp62 * tmp15
tmp64 = tmp48 + tmp63
tl.store(out_ptr0 + (x0), tmp64, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log, log_1, sub, truediv, add, sub_1, pow_1, truediv_1, add_1, sub_2, element_wise, kl], Original ATen: [aten.log, aten.sub, aten.div, aten.add, aten.pow, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_log_mul_pow_sub_sum_0.run(arg0_1, arg1_1, arg2_1, arg3_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class KLNormal(nn.Module):
def __init__(self):
super(KLNormal, self).__init__()
def forward(self, qm, qv, pm, pv):
element_wise = 0.5 * (torch.log(pv) - torch.log(qv) + qv / pv + (qm -
pm).pow(2) / pv - 1)
kl = element_wise.sum(-1)
return kl
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_log_mul_pow_sub_sum_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp40 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp41 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp49 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp51 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp56 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tmp2 / tmp0
tmp6 = tmp4 + tmp5
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 / tmp0
tmp12 = tmp6 + tmp11
tmp13 = 1.0
tmp14 = tmp12 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp18 = tl_math.log(tmp17)
tmp20 = tl_math.log(tmp19)
tmp21 = tmp18 - tmp20
tmp22 = tmp19 / tmp17
tmp23 = tmp21 + tmp22
tmp26 = tmp24 - tmp25
tmp27 = tmp26 * tmp26
tmp28 = tmp27 / tmp17
tmp29 = tmp23 + tmp28
tmp30 = tmp29 - tmp13
tmp31 = tmp30 * tmp15
tmp32 = tmp16 + tmp31
tmp34 = tl_math.log(tmp33)
tmp36 = tl_math.log(tmp35)
tmp37 = tmp34 - tmp36
tmp38 = tmp35 / tmp33
tmp39 = tmp37 + tmp38
tmp42 = tmp40 - tmp41
tmp43 = tmp42 * tmp42
tmp44 = tmp43 / tmp33
tmp45 = tmp39 + tmp44
tmp46 = tmp45 - tmp13
tmp47 = tmp46 * tmp15
tmp48 = tmp32 + tmp47
tmp50 = tl_math.log(tmp49)
tmp52 = tl_math.log(tmp51)
tmp53 = tmp50 - tmp52
tmp54 = tmp51 / tmp49
tmp55 = tmp53 + tmp54
tmp58 = tmp56 - tmp57
tmp59 = tmp58 * tmp58
tmp60 = tmp59 / tmp49
tmp61 = tmp55 + tmp60
tmp62 = tmp61 - tmp13
tmp63 = tmp62 * tmp15
tmp64 = tmp48 + tmp63
tl.store(out_ptr0 + x0, tmp64, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_log_mul_pow_sub_sum_0[grid(64)](arg0_1,
arg1_1, arg2_1, arg3_1, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf0,
class KLNormalNew(nn.Module):
def __init__(self):
super(KLNormalNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| kayburns/craftassist | KLNormal | false | 3,806 | [
"MIT"
] | 0 | 07909493d320afc2c9ff428d0891bc3acd4dc68f | https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, qm, qv, pm, pv):
element_wise = 0.5 * (torch.log(pv) - torch.log(qv) + qv / pv + (qm -
pm).pow(2) / pv - 1)
kl = element_wise.sum(-1)
return kl
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LabelSmoothingBCE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fv/cfvrnoxnx3hmrhzs46ipufr5zookbi4tvziof7hfy5yxhfmb3eo5.py
# Topologically Sorted Source Nodes: [eq_1, smooth_target_1, eq, smooth_target, binary_cross_entropy_with_logits], Original ATen: [aten.eq, aten.masked_fill, aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default_2, log1p, minimum, mul, neg, sub, sub_1, sub_2
# eq => eq
# eq_1 => eq_1
# smooth_target => full_default, where
# smooth_target_1 => full_default_1, where_1
# Graph fragment:
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %arg0_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %where), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %where_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_2, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0 = async_compile.triton('triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = tmp0 == tmp3
tmp5 = tl.where(tmp4, tmp3, tmp0)
tmp6 = tl.where(tmp2, tmp1, tmp5)
tmp7 = tmp3 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.minimum(tmp1, tmp8)
tmp11 = tl_math.abs(tmp8)
tmp12 = -tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp10 - tmp14
tmp16 = tmp9 - tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [eq_1, smooth_target_1, eq, smooth_target, binary_cross_entropy_with_logits], Original ATen: [aten.eq, aten.masked_fill, aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class LabelSmoothingBCE(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingBCE, self).__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
smooth_target = target.clone().masked_fill(target == 1, self.confidence
)
smooth_target = smooth_target.masked_fill(target == 0, self.smoothing)
return self.criterion(x, smooth_target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp3 = 1.0
tmp4 = tmp0 == tmp3
tmp5 = tl.where(tmp4, tmp3, tmp0)
tmp6 = tl.where(tmp2, tmp1, tmp5)
tmp7 = tmp3 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.minimum(tmp1, tmp8)
tmp11 = tl_math.abs(tmp8)
tmp12 = -tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tmp10 - tmp14
tmp16 = tmp9 - tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_eq_masked_fill_0[grid
(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
return buf0,
class LabelSmoothingBCENew(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingBCENew, self).__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kayburns/craftassist | LabelSmoothingBCE | false | 3,810 | [
"MIT"
] | 0 | 07909493d320afc2c9ff428d0891bc3acd4dc68f | https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, smoothing=0.0):
super().__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
smooth_target = target.clone().masked_fill(target == 1, self.confidence
)
smooth_target = smooth_target.masked_fill(target == 0, self.smoothing)
return self.criterion(x, smooth_target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HighwayLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/i3/ci3asduovereenc5juon6pzphu6oeecdpkloo6migbh6cxdu6y2t.py
# Topologically Sorted Source Nodes: [gate, nlin, mul, sub, mul_1, res], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# gate => sigmoid
# mul => mul
# mul_1 => mul_1
# nlin => tanh
# res => add
# sub => sub
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate, nlin, mul, sub, mul_1, res], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf0, buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
def my_xavier_init(m, gain=1):
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayer(torch.nn.Module):
def __init__(self, dim):
super(HighwayLayer, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x))
nlin = torch.tanh(self.nlin_proj(x))
res = gate * nlin + (1 - gate) * x
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, buf1,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
def my_xavier_init(m, gain=1):
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayerNew(torch.nn.Module):
def __init__(self, dim):
super(HighwayLayerNew, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, input_0):
primals_1 = self.gate_proj.weight
primals_2 = self.gate_proj.bias
primals_4 = self.nlin_proj.weight
primals_5 = self.nlin_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| kayburns/craftassist | HighwayLayer | false | 3,811 | [
"MIT"
] | 0 | 07909493d320afc2c9ff428d0891bc3acd4dc68f | https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
def my_xavier_init(m, gain=1):
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class Model(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x))
nlin = torch.tanh(self.nlin_proj(x))
res = gate * nlin + (1 - gate) * x
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
HighwayNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ki/cki5tciw2mqxckl6wp54w7myqiup2tteoeveayxtieh64bftxlft.py
# Topologically Sorted Source Nodes: [sub, gate, nonlin, mul, sub_1, mul_1, res], Original ATen: [aten.sub, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# gate => sigmoid
# mul => mul
# mul_1 => mul_1
# nonlin => relu
# res => add
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 2), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sub,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %relu), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp10 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp3 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp3
tmp11 = tmp9 * tmp10
tmp12 = tmp7 + tmp11
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [lin], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, gate, nonlin, mul, sub_1, mul_1, res], Original ATen: [aten.sub, aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0.run(buf0, buf2, buf1, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class HighwayNetwork(nn.Module):
def __init__(self, in_dim, out_dim):
super(HighwayNetwork, self).__init__()
self.gate_proj = nn.Linear(in_dim, out_dim)
self.lin_proj = nn.Linear(in_dim, out_dim)
self.nonlin_proj = nn.Linear(in_dim, out_dim)
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
else:
torch.nn.init.constant_(p, 0)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x) - 2)
lin = self.lin_proj(x)
nonlin = torch.relu(self.nonlin_proj(x))
res = gate * nonlin + (1 - gate) * lin
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp10 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp3 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp3
tmp11 = tmp9 * tmp10
tmp12 = tmp7 + tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_sub_0[grid(256)](buf0,
buf2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, buf1, buf2
class HighwayNetworkNew(nn.Module):
def __init__(self, in_dim, out_dim):
super(HighwayNetworkNew, self).__init__()
self.gate_proj = nn.Linear(in_dim, out_dim)
self.lin_proj = nn.Linear(in_dim, out_dim)
self.nonlin_proj = nn.Linear(in_dim, out_dim)
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
else:
torch.nn.init.constant_(p, 0)
def forward(self, input_0):
primals_1 = self.gate_proj.weight
primals_2 = self.gate_proj.bias
primals_4 = self.lin_proj.weight
primals_5 = self.lin_proj.bias
primals_6 = self.nonlin_proj.weight
primals_7 = self.nonlin_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kayburns/craftassist | HighwayNetwork | false | 3,813 | [
"MIT"
] | 0 | 07909493d320afc2c9ff428d0891bc3acd4dc68f | https://github.com/kayburns/craftassist/tree/07909493d320afc2c9ff428d0891bc3acd4dc68f | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.gate_proj = nn.Linear(in_dim, out_dim)
self.lin_proj = nn.Linear(in_dim, out_dim)
self.nonlin_proj = nn.Linear(in_dim, out_dim)
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_normal_(p)
else:
torch.nn.init.constant_(p, 0)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x) - 2)
lin = self.lin_proj(x)
nonlin = torch.relu(self.nonlin_proj(x))
res = gate * nonlin + (1 - gate) * lin
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SoftmaxRegression | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [probas], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# probas => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/v4/cv4nyn2kde7dd2c53ddahw4vtxyldln6pqt62jrliqindkf3sj5m.py
# Topologically Sorted Source Nodes: [probas], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# probas => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [probas], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [probas], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
class SoftmaxRegression(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(SoftmaxRegression, self).__init__()
self.linear = torch.nn.Linear(num_features, num_classes)
def forward(self, x):
logits = self.linear(x)
probas = F.softmax(logits, dim=1)
return logits, probas
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class SoftmaxRegressionNew(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(SoftmaxRegressionNew, self).__init__()
self.linear = torch.nn.Linear(num_features, num_classes)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| kbrezinski/stat-453-deep-learning | SoftmaxRegression | false | 3,817 | [
"BSD-3-Clause"
] | 0 | b10240b5c3a970231dcea9221d3d179d26fc197d | https://github.com/kbrezinski/stat-453-deep-learning/tree/b10240b5c3a970231dcea9221d3d179d26fc197d | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
self.linear = torch.nn.Linear(num_features, num_classes)
def forward(self, x):
logits = self.linear(x)
probas = F.softmax(logits, dim=1)
return logits, probas
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CustomizedNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zb/czbrdc6746xv7kfxrqkzgbhm74ijdfuyfd3sz3llzzwzm6wzxmfi.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.distributed
class CustomizedNet(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, x):
x = x.view(-1, x.shape[1] * x.shape[2])
x = self.fc1(x)
x = self.dropout(x)
x = self.relu1(x)
x = self.fc2(x)
x = torch.unsqueeze(x, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5, 'input_size': 4, 'input_feature_num': 4,
'hidden_dim': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1),
0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf1, primals_4
class CustomizedNetNew(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| jason-dai/BigDL | CustomizedNet | false | 3,818 | [
"Apache-2.0"
] | 0 | 81ee60a73707d91c58d9bcd5b17c8e5731741a85 | https://github.com/jason-dai/BigDL/tree/81ee60a73707d91c58d9bcd5b17c8e5731741a85 | import torch
import torch.nn as nn
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, x):
x = x.view(-1, x.shape[1] * x.shape[2])
x = self.fc1(x)
x = self.dropout(x)
x = self.relu1(x)
x = self.fc2(x)
x = torch.unsqueeze(x, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5, 'input_size': 4, 'input_feature_num': 4,
'hidden_dim': 4, 'output_size': 4}]
|
DQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/j5/cj5gdxypgkf66dj2li3nqkjzopfmp3qfw4apmam5mi7ylaazpavu.py
# Topologically Sorted Source Nodes: [layer_norm, hidden], Original ATen: [aten.native_layer_norm, aten.leaky_relu]
# Source node to ATen node mapping:
# hidden => gt, mul_2, where
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_1, %mul_2), kwargs = {})
triton_per_fused_leaky_relu_native_layer_norm_0 = async_compile.triton('triton_per_fused_leaky_relu_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_leaky_relu_native_layer_norm_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_leaky_relu_native_layer_norm_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 20
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (20*x0)), rmask & xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r1), rmask, eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr2 + (r1), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 20, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 20.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.01
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + (20*x0)), tmp32, rmask & xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (20, ), (1, ))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (4, 20), (20, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [layer_norm, hidden], Original ATen: [aten.native_layer_norm, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_per_fused_leaky_relu_native_layer_norm_0.run(buf4, buf6, buf0, primals_4, primals_5, buf1, 64, 20, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf6, (64, 20), (20, 1), 0), reinterpret_tensor(primals_6, (20, 4), (1, 20), 0), alpha=1, beta=1, out=buf7)
del primals_7
return (reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, buf4, reinterpret_tensor(buf6, (64, 20), (20, 1), 0), primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((20, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DQN(nn.Module):
def __init__(self, obs_size: 'int', num_actions: 'int', hidden_size:
'int'=20):
super(DQN, self).__init__()
self.l1 = nn.Linear(obs_size, hidden_size)
self.n1 = nn.LayerNorm(hidden_size, elementwise_affine=True)
self.l3 = nn.Linear(hidden_size, num_actions)
self.activ = torch.nn.LeakyReLU()
def forward(self, x):
hidden = self.activ(self.n1(self.l1(x)))
output = self.l3(hidden)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'obs_size': 4, 'num_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_leaky_relu_native_layer_norm_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
rnumel = 20
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 20 * x0), rmask & xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last',
other=0.0)
tmp26 = tl.load(in_ptr2 + r1, rmask, eviction_policy='evict_last',
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 20, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 20.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 * tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.01
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(in_out_ptr1 + (r1 + 20 * x0), tmp32, rmask & xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (20,), (1,))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (4, 20), (20, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.
float32)
buf6 = buf5
del buf5
get_raw_stream(0)
triton_per_fused_leaky_relu_native_layer_norm_0[grid(64)](buf4,
buf6, buf0, primals_4, primals_5, buf1, 64, 20, XBLOCK=8,
num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf6, (64, 20),
(20, 1), 0), reinterpret_tensor(primals_6, (20, 4), (1, 20), 0),
alpha=1, beta=1, out=buf7)
del primals_7
return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4,
1), 0), buf0, buf1, buf4, reinterpret_tensor(buf6, (64, 20), (20, 1), 0
), primals_6
class DQNNew(nn.Module):
def __init__(self, obs_size: 'int', num_actions: 'int', hidden_size:
'int'=20):
super(DQNNew, self).__init__()
self.l1 = nn.Linear(obs_size, hidden_size)
self.n1 = nn.LayerNorm(hidden_size, elementwise_affine=True)
self.l3 = nn.Linear(hidden_size, num_actions)
self.activ = torch.nn.LeakyReLU()
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.n1.weight
primals_5 = self.n1.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kcorder/vcg_dqn | DQN | false | 3,819 | [
"MIT"
] | 0 | da43892f701fe88a4c751f209da2743fd824d2f5 | https://github.com/kcorder/vcg_dqn/tree/da43892f701fe88a4c751f209da2743fd824d2f5 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, obs_size: 'int', num_actions: 'int', hidden_size:
'int'=20):
super().__init__()
self.l1 = nn.Linear(obs_size, hidden_size)
self.n1 = nn.LayerNorm(hidden_size, elementwise_affine=True)
self.l3 = nn.Linear(hidden_size, num_actions)
self.activ = torch.nn.LeakyReLU()
def forward(self, x):
hidden = self.activ(self.n1(self.l1(x)))
output = self.l3(hidden)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ActorNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/2j/c2jdoj4tcaujecuntbzcpssdm46qqc55mrqjpjrmi7wwyblphesm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e7/ce7ewq7bv76ie5hdmfxjj46viiuxlajdhtbost7f4gwclfa3hk4i.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_2 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 512), (512, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 32768, grid=grid(32768), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 256), (1, 512), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 16384, grid=grid(16384), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def init_hidden(layer):
"""
Initialize NN layers
"""
input_size = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(input_size)
return -lim, lim
class ActorNN(nn.Module):
"""
Actor Class
"""
def __init__(self, state_size, action_size, hidden_size1=512,
hidden_size2=256):
"""
Initialize parameters
"""
super(ActorNN, self).__init__()
self.state_size = state_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
self.action_size = action_size
self.FC1 = nn.Linear(self.state_size, self.hidden_size1)
self.FC2 = nn.Linear(self.hidden_size1, self.hidden_size2)
self.FC3 = nn.Linear(self.hidden_size2, self.action_size)
self.reset_parameters()
def forward(self, state):
x = F.relu(self.FC1(state))
x = F.relu(self.FC2(x))
x = torch.tanh(self.FC3(x))
return x
def reset_parameters(self):
self.FC1.weight.data.uniform_(*init_hidden(self.FC1))
self.FC2.weight.data.uniform_(*init_hidden(self.FC2))
self.FC3.weight.data.uniform_(-0.003, 0.003)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 512), (512, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1,
primals_2, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0),
reinterpret_tensor(primals_4, (512, 256), (1, 512), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf3,
primals_5, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 512), (512, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf5, primals_6, buf6, primals_4, buf7
def init_hidden(layer):
"""
Initialize NN layers
"""
input_size = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(input_size)
return -lim, lim
class ActorNNNew(nn.Module):
"""
Actor Class
"""
def __init__(self, state_size, action_size, hidden_size1=512,
hidden_size2=256):
"""
Initialize parameters
"""
super(ActorNNNew, self).__init__()
self.state_size = state_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
self.action_size = action_size
self.FC1 = nn.Linear(self.state_size, self.hidden_size1)
self.FC2 = nn.Linear(self.hidden_size1, self.hidden_size2)
self.FC3 = nn.Linear(self.hidden_size2, self.action_size)
self.reset_parameters()
def reset_parameters(self):
self.FC1.weight.data.uniform_(*init_hidden(self.FC1))
self.FC2.weight.data.uniform_(*init_hidden(self.FC2))
self.FC3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0):
primals_1 = self.FC1.weight
primals_2 = self.FC1.bias
primals_4 = self.FC2.weight
primals_5 = self.FC2.bias
primals_6 = self.FC3.weight
primals_7 = self.FC3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control | ActorNN | false | 3,821 | [
"MIT"
] | 0 | d724e09d7a5948e2023fb86bf977455f3c507054 | https://github.com/kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control/tree/d724e09d7a5948e2023fb86bf977455f3c507054 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def init_hidden(layer):
"""
Initialize NN layers
"""
input_size = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(input_size)
return -lim, lim
class Model(nn.Module):
"""
Actor Class
"""
def __init__(self, state_size, action_size, hidden_size1=512,
hidden_size2=256):
"""
Initialize parameters
"""
super().__init__()
self.state_size = state_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
self.action_size = action_size
self.FC1 = nn.Linear(self.state_size, self.hidden_size1)
self.FC2 = nn.Linear(self.hidden_size1, self.hidden_size2)
self.FC3 = nn.Linear(self.hidden_size2, self.action_size)
self.reset_parameters()
def forward(self, state):
x = F.relu(self.FC1(state))
x = F.relu(self.FC2(x))
x = torch.tanh(self.FC3(x))
return x
def reset_parameters(self):
self.FC1.weight.data.uniform_(*init_hidden(self.FC1))
self.FC2.weight.data.uniform_(*init_hidden(self.FC2))
self.FC3.weight.data.uniform_(-0.003, 0.003)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
FeaturewiseAffine | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/vd/cvdopoziielrkt2t7tzyovmsaytzs4yqxbasy36krqzz5iwk2uvr.py
# Topologically Sorted Source Nodes: [mul, res], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# res => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %arg2_1), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, res], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(arg0_1, arg1_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Union
import torch.nn as nn
class FeaturewiseAffine(nn.Module):
"""Feature-wise affine layer."""
def __init__(self):
super().__init__()
def forward(self, x, scale: 'Union[float, torch.Tensor]', shift:
'Union[float, torch.Tensor]'):
res = scale * x + shift
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class FeaturewiseAffineNew(nn.Module):
"""Feature-wise affine layer."""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| ketan0/ddim | FeaturewiseAffine | false | 3,822 | [
"MIT"
] | 0 | 26f2de1107885a3f332dd8435b73a1eaedbe10a8 | https://github.com/ketan0/ddim/tree/26f2de1107885a3f332dd8435b73a1eaedbe10a8 | import torch
from typing import Union
import torch.nn as nn
class Model(nn.Module):
"""Feature-wise affine layer."""
def __init__(self):
super().__init__()
def forward(self, x, scale: 'Union[float, torch.Tensor]', shift:
'Union[float, torch.Tensor]'):
res = scale * x + shift
return res
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
BiAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wm/cwm4p6i4onfgzirdknbkkkkalyi643y6ynzifzggmplzobgfpkd4.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gb/cgbvhbwg6zjbgpseylzouy4kgopkhyzopan7swcnpk23a2o45zr5.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# output_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oz/cozxojbwhgofvogqiimshn6ijmzvg2zn4rlyuttbab5raacnhzht.py
# Topologically Sorted Source Nodes: [add, add_1, output_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# output_2 => add_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %unsqueeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_6), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x2 + (4*x1) + (16*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_5, buf3, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(primals_2, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6)
del buf4
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [add, add_1, output_2], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf7, buf0, buf1, primals_6, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_6
return (buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
import torch.nn as nn
from torch.nn.parameter import Parameter
class BiAttention(nn.Module):
def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int',
num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None:
super(BiAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_e = Parameter(torch.Tensor(self.num_labels, self.
input_size_encoder))
self.W_d = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.xavier_uniform_(self.W_e)
nn.init.xavier_uniform_(self.W_d)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_d: 'torch.Tensor', input_e: 'torch.Tensor',
mask_d: 'Optional[torch.Tensor]'=None, mask_e:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
assert input_d.size(0) == input_e.size(0)
_batch, _length_decoder, _ = input_d.size()
_, _length_encoder, _ = input_e.size()
out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3)
out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2)
if self.biaffine:
output = torch.matmul(input_d.unsqueeze(1), self.U)
output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3))
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3
) * mask_e.unsqueeze(1).unsqueeze(2)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size_encoder': 4, 'input_size_decoder': 4,
'num_labels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x2 + 4 * x1 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x4, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_5, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = buf3
del buf3
triton_poi_fused_clone_2[grid(256)](primals_2, buf5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (16, 4, 4), (16,
4, 1), 0), out=buf6)
del buf4
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_add_3[grid(256)](buf7, buf0, buf1, primals_6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_6
return buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
class BiAttentionNew(nn.Module):
def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int',
num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None:
super(BiAttentionNew, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_e = Parameter(torch.Tensor(self.num_labels, self.
input_size_encoder))
self.W_d = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.xavier_uniform_(self.W_e)
nn.init.xavier_uniform_(self.W_d)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_0, input_1):
primals_3 = self.W_e
primals_4 = self.W_d
primals_6 = self.b
primals_1 = self.U
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| katie0809/KLUE-baseline | BiAttention | false | 3,823 | [
"Apache-2.0"
] | 0 | 144973359e9dc3bbbb3ce7a0cc765b0207f63775 | https://github.com/katie0809/KLUE-baseline/tree/144973359e9dc3bbbb3ce7a0cc765b0207f63775 | import torch
from typing import Optional
import torch.nn as nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, input_size_encoder: 'int', input_size_decoder: 'int',
num_labels: 'int', biaffine: 'bool'=True, **kwargs) ->None:
super().__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_e = Parameter(torch.Tensor(self.num_labels, self.
input_size_encoder))
self.W_d = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.
input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self) ->None:
nn.init.xavier_uniform_(self.W_e)
nn.init.xavier_uniform_(self.W_d)
nn.init.constant_(self.b, 0.0)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_d: 'torch.Tensor', input_e: 'torch.Tensor',
mask_d: 'Optional[torch.Tensor]'=None, mask_e:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
assert input_d.size(0) == input_e.size(0)
_batch, _length_decoder, _ = input_d.size()
_, _length_encoder, _ = input_e.size()
out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3)
out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2)
if self.biaffine:
output = torch.matmul(input_d.unsqueeze(1), self.U)
output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3))
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3
) * mask_e.unsqueeze(1).unsqueeze(2)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size_encoder': 4, 'input_size_decoder': 4,
'num_labels': 4}]
|
Mish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nf/cnfomfgkrddid5xfz2abcevz3avk7wtz7mughpoinpbdg3kjz27l.py
# Topologically Sorted Source Nodes: [softplus, tanh, mul_], Original ATen: [aten.softplus, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# mul_ => mul
# softplus => exp, gt, log1p, where
# tanh => tanh
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %tanh), kwargs = {})
# %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %mul), kwargs = {})
triton_poi_fused_mul_softplus_tanh_0 = async_compile.triton('triton_poi_fused_mul_softplus_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_softplus_tanh_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr1 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [softplus, tanh, mul_], Original ATen: [aten.softplus, aten.tanh, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0)
return (arg0_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class Mish(nn.Module):
def forward(self, x):
return x.mul_(F.softplus(x).tanh())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return arg0_1,
class MishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| khayliang/single_person_tracking | Mish | false | 3,824 | [
"MIT"
] | 0 | d93aae3742ba3c77f00b3917b182784f03b5d597 | https://github.com/khayliang/single_person_tracking/tree/d93aae3742ba3c77f00b3917b182784f03b5d597 | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
def forward(self, x):
return x.mul_(F.softplus(x).tanh())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hf/chfkflh64ls6zzbpznzkz4j3nrkuncb5q5sdz3uisbtruo4dxnxl.py
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
# Source node to ATen node mapping:
# add => add
# distance_negative => sum_2
# distance_positive => sum_1
# losses => relu
# mean => mean
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg2_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1.0), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_add_mean_pow_relu_sub_sum_0 = async_compile.triton('triton_per_fused_add_mean_pow_relu_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_relu_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp19 = tl.load(in_ptr2 + (r0 + (64*r1)), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 1.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
class TripletLoss(nn.Module):
def __init__(self, margin=1.0):
super(TripletLoss, self).__init__()
self.margin = margin
def calc_euclidean(self, x1, x2):
return (x1 - x2).pow(2).sum(1)
def forward(self, anchor: 'torch.Tensor', positive: 'torch.Tensor',
negative: 'torch.Tensor') ->torch.Tensor:
distance_positive = self.calc_euclidean(anchor, positive)
distance_negative = self.calc_euclidean(anchor, negative)
losses = torch.relu(distance_positive - distance_negative + self.margin
)
return losses.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 1.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLossNew(nn.Module):
def __init__(self, margin=1.0):
super(TripletLossNew, self).__init__()
self.margin = margin
def calc_euclidean(self, x1, x2):
return (x1 - x2).pow(2).sum(1)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| ketan-lambat/contrastive-unpaired-translation | TripletLoss | false | 3,825 | [
"BSD-3-Clause"
] | 0 | ea71b3a9603a51b97f1fa8426d5a1beae9260a0d | https://github.com/ketan-lambat/contrastive-unpaired-translation/tree/ea71b3a9603a51b97f1fa8426d5a1beae9260a0d | import torch
import torch.utils.data
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, margin=1.0):
super().__init__()
self.margin = margin
def calc_euclidean(self, x1, x2):
return (x1 - x2).pow(2).sum(1)
def forward(self, anchor: 'torch.Tensor', positive: 'torch.Tensor',
negative: 'torch.Tensor') ->torch.Tensor:
distance_positive = self.calc_euclidean(anchor, positive)
distance_negative = self.calc_euclidean(anchor, negative)
losses = torch.relu(distance_positive - distance_negative + self.margin
)
return losses.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
CriticNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/a3/ca3z5uhmtspcn6kafgixsymyw4sumsxr5y5dc3a2bqoje3glppxc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 516
x1 = (xindex // 516)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((512*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 516, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-512) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y2/cy2lwgz7dq2q2z4ifepdde4l7vyyvrwcx4zjn2ezmtzcanvhv374.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uf/cufglc4o74j5advtp5cf2twmg7ejdnnhpx7qsejsxqvd4icuyfqh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 516), (516, 1))
assert_size_stride(primals_6, (256, ), (1, ))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 516), (516, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 2064, grid=grid(2064), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (516, 256), (1, 516), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_6, 1024, grid=grid(1024), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf0, primals_2, buf6, 2048, grid=grid(2048), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, 516), (516, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def init_hidden(layer):
"""
Initialize NN layers
"""
input_size = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(input_size)
return -lim, lim
class CriticNN(nn.Module):
"""
Critic class
"""
def __init__(self, state_size, action_size, hidden_size1=512,
hidden_size2=256):
"""
Initialize parameters
"""
super(CriticNN, self).__init__()
self.state_size = state_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
self.action_size = action_size
self.FC1 = nn.Linear(self.state_size, self.hidden_size1)
self.FC2 = nn.Linear(self.hidden_size1 + self.action_size, self.
hidden_size2)
self.FC3 = nn.Linear(self.hidden_size2, 1)
self.reset_parameters()
def forward(self, state, action):
x = F.relu(self.FC1(state))
x = torch.cat((x, action), dim=1)
x = F.relu(self.FC2(x))
x = self.FC3(x)
return x
def reset_parameters(self):
self.FC1.weight.data.uniform_(*init_hidden(self.FC1))
self.FC2.weight.data.uniform_(*init_hidden(self.FC2))
self.FC3.weight.data.uniform_(-0.003, 0.003)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 516
x1 = xindex // 516
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (512 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 516, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-512 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (512, 4), (4, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 516), (516, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 512),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 516), (516, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(2064)](buf0, primals_2, primals_4, buf1,
2064, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (516, 256), (
1, 516), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(1024)](buf3, primals_6, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7,
(256, 1), (1, 256), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(2048)](buf0,
primals_2, buf6, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6
def init_hidden(layer):
"""
Initialize NN layers
"""
input_size = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(input_size)
return -lim, lim
class CriticNNNew(nn.Module):
"""
Critic class
"""
def __init__(self, state_size, action_size, hidden_size1=512,
hidden_size2=256):
"""
Initialize parameters
"""
super(CriticNNNew, self).__init__()
self.state_size = state_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
self.action_size = action_size
self.FC1 = nn.Linear(self.state_size, self.hidden_size1)
self.FC2 = nn.Linear(self.hidden_size1 + self.action_size, self.
hidden_size2)
self.FC3 = nn.Linear(self.hidden_size2, 1)
self.reset_parameters()
def reset_parameters(self):
self.FC1.weight.data.uniform_(*init_hidden(self.FC1))
self.FC2.weight.data.uniform_(*init_hidden(self.FC2))
self.FC3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0, input_1):
primals_1 = self.FC1.weight
primals_2 = self.FC1.bias
primals_5 = self.FC2.weight
primals_6 = self.FC2.bias
primals_7 = self.FC3.weight
primals_8 = self.FC3.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control | CriticNN | false | 3,826 | [
"MIT"
] | 0 | d724e09d7a5948e2023fb86bf977455f3c507054 | https://github.com/kaustav1987/Tennis-Collaboration-and-Competition-Continuous-Control/tree/d724e09d7a5948e2023fb86bf977455f3c507054 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def init_hidden(layer):
"""
Initialize NN layers
"""
input_size = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(input_size)
return -lim, lim
class Model(nn.Module):
"""
Critic class
"""
def __init__(self, state_size, action_size, hidden_size1=512,
hidden_size2=256):
"""
Initialize parameters
"""
super().__init__()
self.state_size = state_size
self.hidden_size1 = hidden_size1
self.hidden_size2 = hidden_size2
self.action_size = action_size
self.FC1 = nn.Linear(self.state_size, self.hidden_size1)
self.FC2 = nn.Linear(self.hidden_size1 + self.action_size, self.
hidden_size2)
self.FC3 = nn.Linear(self.hidden_size2, 1)
self.reset_parameters()
def forward(self, state, action):
x = F.relu(self.FC1(state))
x = torch.cat((x, action), dim=1)
x = F.relu(self.FC2(x))
x = self.FC3(x)
return x
def reset_parameters(self):
self.FC1.weight.data.uniform_(*init_hidden(self.FC1))
self.FC2.weight.data.uniform_(*init_hidden(self.FC2))
self.FC3.weight.data.uniform_(-0.003, 0.003)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
AmdimNCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lo/clogbilbksb5pe5z7x7zzgxgebeyhflxrjymvbwjeoos6el7ofav.py
# Topologically Sorted Source Nodes: [raw_scores_2, mul_1, tanh, x_clip, max_1, mul_3, pos_scores, pos_shiftexp, sub_3, exp_1, sub_2, exp, mul_6, neg_sumexp, add, all_logsumexp, nce_scores, mean_1, nce_scores_1], Original ATen: [aten.div, aten.mul, aten.tanh, aten.max, aten.sum, aten.sub, aten.exp, aten.add, aten.log, aten.mean, aten.neg]
# Source node to ATen node mapping:
# add => add
# all_logsumexp => log
# exp => exp
# exp_1 => exp_1
# max_1 => max_1
# mean_1 => mean_1
# mul_1 => mul_1
# mul_3 => mul_3
# mul_6 => mul_6
# nce_scores => sub_5
# nce_scores_1 => neg
# neg_sumexp => sum_2
# pos_scores => sum_1
# pos_shiftexp => sub_4
# raw_scores_2 => div
# sub_2 => sub_2
# sub_3 => sub_3
# tanh => tanh
# x_clip => mul_2
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, 2.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 0.25), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_1, 1, True), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %mul_2), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %getitem), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %getitem), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %exp), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %log), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_5,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {})
triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0 = async_compile.triton('triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tmp8 = libdevice.tanh(tmp7)
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tmp11 = tmp2 * tmp10
tmp12 = tmp0 * tmp9
tmp13 = tmp11 - tmp12
tmp15 = tmp1 - tmp14
tmp17 = tmp16 * tmp4
tmp18 = tmp17 * tmp6
tmp19 = libdevice.tanh(tmp18)
tmp20 = tmp19 * tmp9
tmp21 = tmp15 * tmp20
tmp22 = tmp14 * tmp9
tmp23 = tmp21 - tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp26 = tmp1 - tmp25
tmp28 = tmp27 * tmp4
tmp29 = tmp28 * tmp6
tmp30 = libdevice.tanh(tmp29)
tmp31 = tmp30 * tmp9
tmp32 = tmp26 * tmp31
tmp33 = tmp25 * tmp9
tmp34 = tmp32 - tmp33
tmp35 = triton_helpers.maximum(tmp24, tmp34)
tmp37 = tmp1 - tmp36
tmp39 = tmp38 * tmp4
tmp40 = tmp39 * tmp6
tmp41 = libdevice.tanh(tmp40)
tmp42 = tmp41 * tmp9
tmp43 = tmp37 * tmp42
tmp44 = tmp36 * tmp9
tmp45 = tmp43 - tmp44
tmp46 = triton_helpers.maximum(tmp35, tmp45)
tmp47 = tmp13 - tmp46
tmp48 = tl_math.exp(tmp47)
tmp49 = tmp2 * tmp48
tmp50 = tmp23 - tmp46
tmp51 = tl_math.exp(tmp50)
tmp52 = tmp15 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tmp34 - tmp46
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp26 * tmp55
tmp57 = tmp53 + tmp56
tmp58 = tmp45 - tmp46
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp37 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp0 * tmp10
tmp63 = tmp14 * tmp20
tmp64 = tmp62 + tmp63
tmp65 = tmp25 * tmp31
tmp66 = tmp64 + tmp65
tmp67 = tmp36 * tmp42
tmp68 = tmp66 + tmp67
tmp69 = tmp68 - tmp46
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 + tmp61
tmp72 = tl_math.log(tmp71)
tmp73 = tmp69 - tmp72
tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK])
tmp76 = tl.sum(tmp74, 1)[:, None]
tmp77 = tmp76 / tmp9
tmp78 = -tmp77
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp78, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/i7/ci7wfxaextt4cz2wvuqp24v6te3ikcbwiocamjs25ysoohh6nywz.py
# Topologically Sorted Source Nodes: [raw_scores_2, pow_1, mean, lgt_reg], Original ATen: [aten.div, aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# lgt_reg => mul
# mean => mean
# pow_1 => pow_1
# raw_scores_2 => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, 2.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 2.0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.05), kwargs = {})
triton_per_fused_div_mean_mul_pow_1 = async_compile.triton('triton_per_fused_div_mean_mul_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_mul_pow_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mean_mul_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 16.0
tmp8 = tmp6 / tmp7
tmp9 = 0.05
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(arg0_1, arg1_1, out=buf0)
del arg0_1
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [raw_scores_2, mul_1, tanh, x_clip, max_1, mul_3, pos_scores, pos_shiftexp, sub_3, exp_1, sub_2, exp, mul_6, neg_sumexp, add, all_logsumexp, nce_scores, mean_1, nce_scores_1], Original ATen: [aten.div, aten.mul, aten.tanh, aten.max, aten.sum, aten.sub, aten.exp, aten.add, aten.log, aten.mean, aten.neg]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0.run(buf6, arg2_1, buf0, 1, 4, grid=grid(1), stream=stream0)
del arg2_1
buf5 = empty_strided_cuda((), (), torch.float32)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [raw_scores_2, pow_1, mean, lgt_reg], Original ATen: [aten.div, aten.pow, aten.mean, aten.mul]
triton_per_fused_div_mean_mul_pow_1.run(buf7, buf0, 1, 16, grid=grid(1), stream=stream0)
del buf0
return (buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def tanh_clip(x, clip_val=10.0):
"""
soft clip values to the range [-clip_val, +clip_val]
"""
if clip_val is not None:
x_clip = clip_val * torch.tanh(1.0 / clip_val * x)
else:
x_clip = x
return x_clip
class AmdimNCELoss(nn.Module):
"""
Compute the NCE scores for predicting r_src->r_trg.
"""
def __init__(self, tclip):
super().__init__()
self.tclip = tclip
def forward(self, anchor_representations, positive_representations,
mask_mat):
"""
Args:
anchor_representations: (batch_size, emb_dim)
positive_representations: (emb_dim, n_batch * w* h) (ie: nb_feat_vectors x embedding_dim)
mask_mat: (n_batch_gpu, n_batch)
Output:
raw_scores: (n_batch_gpu, n_locs)
nce_scores: (n_batch_gpu, n_locs)
lgt_reg : scalar
"""
r_src = anchor_representations
r_trg = positive_representations
batch_size, emb_dim = r_src.size()
nb_feat_vectors = r_trg.size(1) // batch_size
mask_pos = mask_mat.unsqueeze(dim=2).expand(-1, -1, nb_feat_vectors
).float()
mask_neg = 1.0 - mask_pos
raw_scores = torch.mm(r_src, r_trg).float()
raw_scores = raw_scores.reshape(batch_size, batch_size, nb_feat_vectors
)
raw_scores = raw_scores / emb_dim ** 0.5
lgt_reg = 0.05 * (raw_scores ** 2.0).mean()
raw_scores = tanh_clip(raw_scores, clip_val=self.tclip)
"""
pos_scores includes scores for all the positive samples
neg_scores includes scores for all the negative samples, with
scores for positive samples set to the min score (-self.tclip here)
"""
pos_scores = (mask_pos * raw_scores).sum(dim=1)
neg_scores = mask_neg * raw_scores - self.tclip * mask_pos
neg_scores = neg_scores.reshape(batch_size, -1)
mask_neg = mask_neg.reshape(batch_size, -1)
neg_maxes = torch.max(neg_scores, dim=1, keepdim=True)[0]
neg_sumexp = (mask_neg * torch.exp(neg_scores - neg_maxes)).sum(dim
=1, keepdim=True)
all_logsumexp = torch.log(torch.exp(pos_scores - neg_maxes) +
neg_sumexp)
pos_shiftexp = pos_scores - neg_maxes
nce_scores = pos_shiftexp - all_logsumexp
nce_scores = -nce_scores.mean()
return nce_scores, lgt_reg
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'tclip': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tmp8 = libdevice.tanh(tmp7)
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tmp11 = tmp2 * tmp10
tmp12 = tmp0 * tmp9
tmp13 = tmp11 - tmp12
tmp15 = tmp1 - tmp14
tmp17 = tmp16 * tmp4
tmp18 = tmp17 * tmp6
tmp19 = libdevice.tanh(tmp18)
tmp20 = tmp19 * tmp9
tmp21 = tmp15 * tmp20
tmp22 = tmp14 * tmp9
tmp23 = tmp21 - tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp26 = tmp1 - tmp25
tmp28 = tmp27 * tmp4
tmp29 = tmp28 * tmp6
tmp30 = libdevice.tanh(tmp29)
tmp31 = tmp30 * tmp9
tmp32 = tmp26 * tmp31
tmp33 = tmp25 * tmp9
tmp34 = tmp32 - tmp33
tmp35 = triton_helpers.maximum(tmp24, tmp34)
tmp37 = tmp1 - tmp36
tmp39 = tmp38 * tmp4
tmp40 = tmp39 * tmp6
tmp41 = libdevice.tanh(tmp40)
tmp42 = tmp41 * tmp9
tmp43 = tmp37 * tmp42
tmp44 = tmp36 * tmp9
tmp45 = tmp43 - tmp44
tmp46 = triton_helpers.maximum(tmp35, tmp45)
tmp47 = tmp13 - tmp46
tmp48 = tl_math.exp(tmp47)
tmp49 = tmp2 * tmp48
tmp50 = tmp23 - tmp46
tmp51 = tl_math.exp(tmp50)
tmp52 = tmp15 * tmp51
tmp53 = tmp49 + tmp52
tmp54 = tmp34 - tmp46
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp26 * tmp55
tmp57 = tmp53 + tmp56
tmp58 = tmp45 - tmp46
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp37 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp0 * tmp10
tmp63 = tmp14 * tmp20
tmp64 = tmp62 + tmp63
tmp65 = tmp25 * tmp31
tmp66 = tmp64 + tmp65
tmp67 = tmp36 * tmp42
tmp68 = tmp66 + tmp67
tmp69 = tmp68 - tmp46
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 + tmp61
tmp72 = tl_math.log(tmp71)
tmp73 = tmp69 - tmp72
tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK])
tmp76 = tl.sum(tmp74, 1)[:, None]
tmp77 = tmp76 / tmp9
tmp78 = -tmp77
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp78, None)
@triton.jit
def triton_per_fused_div_mean_mul_pow_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tmp7 = 16.0
tmp8 = tmp6 / tmp7
tmp9 = 0.05
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, arg1_1, out=buf0)
del arg0_1
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf6 = buf4
del buf4
get_raw_stream(0)
triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0[grid
(1)](buf6, arg2_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg2_1
buf5 = empty_strided_cuda((), (), torch.float32)
buf7 = buf5
del buf5
triton_per_fused_div_mean_mul_pow_1[grid(1)](buf7, buf0, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del buf0
return buf6, buf7
def tanh_clip(x, clip_val=10.0):
"""
soft clip values to the range [-clip_val, +clip_val]
"""
if clip_val is not None:
x_clip = clip_val * torch.tanh(1.0 / clip_val * x)
else:
x_clip = x
return x_clip
class AmdimNCELossNew(nn.Module):
"""
Compute the NCE scores for predicting r_src->r_trg.
"""
def __init__(self, tclip):
super().__init__()
self.tclip = tclip
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| jfrancis71/pytorch-lightning-bolts | AmdimNCELoss | false | 3,827 | [
"Apache-2.0"
] | 0 | 8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | https://github.com/jfrancis71/pytorch-lightning-bolts/tree/8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | import torch
import torch.nn as nn
def tanh_clip(x, clip_val=10.0):
"""
soft clip values to the range [-clip_val, +clip_val]
"""
if clip_val is not None:
x_clip = clip_val * torch.tanh(1.0 / clip_val * x)
else:
x_clip = x
return x_clip
class Model(nn.Module):
"""
Compute the NCE scores for predicting r_src->r_trg.
"""
def __init__(self, tclip):
super().__init__()
self.tclip = tclip
def forward(self, anchor_representations, positive_representations,
mask_mat):
"""
Args:
anchor_representations: (batch_size, emb_dim)
positive_representations: (emb_dim, n_batch * w* h) (ie: nb_feat_vectors x embedding_dim)
mask_mat: (n_batch_gpu, n_batch)
Output:
raw_scores: (n_batch_gpu, n_locs)
nce_scores: (n_batch_gpu, n_locs)
lgt_reg : scalar
"""
r_src = anchor_representations
r_trg = positive_representations
batch_size, emb_dim = r_src.size()
nb_feat_vectors = r_trg.size(1) // batch_size
mask_pos = mask_mat.unsqueeze(dim=2).expand(-1, -1, nb_feat_vectors
).float()
mask_neg = 1.0 - mask_pos
raw_scores = torch.mm(r_src, r_trg).float()
raw_scores = raw_scores.reshape(batch_size, batch_size, nb_feat_vectors
)
raw_scores = raw_scores / emb_dim ** 0.5
lgt_reg = 0.05 * (raw_scores ** 2.0).mean()
raw_scores = tanh_clip(raw_scores, clip_val=self.tclip)
"""
pos_scores includes scores for all the positive samples
neg_scores includes scores for all the negative samples, with
scores for positive samples set to the min score (-self.tclip here)
"""
pos_scores = (mask_pos * raw_scores).sum(dim=1)
neg_scores = mask_neg * raw_scores - self.tclip * mask_pos
neg_scores = neg_scores.reshape(batch_size, -1)
mask_neg = mask_neg.reshape(batch_size, -1)
neg_maxes = torch.max(neg_scores, dim=1, keepdim=True)[0]
neg_sumexp = (mask_neg * torch.exp(neg_scores - neg_maxes)).sum(dim
=1, keepdim=True)
all_logsumexp = torch.log(torch.exp(pos_scores - neg_maxes) +
neg_sumexp)
pos_shiftexp = pos_scores - neg_maxes
nce_scores = pos_shiftexp - all_logsumexp
nce_scores = -nce_scores.mean()
return nce_scores, lgt_reg
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
Swish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lc/clcsmfjtwsnzu2llmpsmvwrt5ojf76ozdv5ttluhi2gtpojjo6lv.py
# Topologically Sorted Source Nodes: [sigmoid, mul_], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul_ => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {})
# %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %mul), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [sigmoid, mul_], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0)
return (arg0_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Swish(nn.Module):
def forward(self, x):
return x.mul_(torch.sigmoid(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr1 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return arg0_1,
class SwishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| khayliang/single_person_tracking | Swish | false | 3,828 | [
"MIT"
] | 0 | d93aae3742ba3c77f00b3917b182784f03b5d597 | https://github.com/khayliang/single_person_tracking/tree/d93aae3742ba3c77f00b3917b182784f03b5d597 | import torch
from torch import nn
class Model(nn.Module):
def forward(self, x):
return x.mul_(torch.sigmoid(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FakeRKHSConvNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sb/csbfa4itjbflkqsfneo2c6mi3vnrum7dc2e7l2pstoa2kdz6gtd3.py
# Topologically Sorted Source Nodes: [conv2d_2, add, x], Original ATen: [aten.convolution, aten.add, aten._native_batch_norm_legit_no_training, aten.native_batch_norm_backward]
# Source node to ATen node mapping:
# add => add
# conv2d_2 => convolution_2
# x => add_2, mul_1, mul_2, sub
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_2, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %unsqueeze_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %unsqueeze_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %unsqueeze_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_7), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %unsqueeze_10), kwargs = {})
triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full([1], 1, tl.int32)
tmp12 = tmp11 / tmp10
tmp13 = 1.0
tmp14 = tmp12 * tmp13
tmp15 = tmp6 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + (x3), tmp19, xmask)
tl.store(out_ptr1 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [h_res], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, add, x], Original ATen: [aten.convolution, aten.add, aten._native_batch_norm_legit_no_training, aten.native_batch_norm_backward]
triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1.run(buf2, buf3, primals_5, primals_6, primals_7, primals_8, primals_9, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_5
del primals_6
del primals_9
return (buf4, primals_1, primals_2, primals_3, primals_4, primals_7, primals_8, buf1, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import numpy as np
import torch.nn as nn
class MaybeBatchNorm2d(nn.Module):
def __init__(self, n_ftr, affine, use_bn):
super(MaybeBatchNorm2d, self).__init__()
self.bn = nn.BatchNorm2d(n_ftr, affine=affine)
self.use_bn = use_bn
def forward(self, x):
if self.use_bn:
x = self.bn(x)
return x
class FakeRKHSConvNet(nn.Module):
def __init__(self, n_input, n_output, use_bn=False):
super(FakeRKHSConvNet, self).__init__()
self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn_out = MaybeBatchNorm2d(n_output, True, True)
self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride=
1, padding=0, bias=True)
if n_output >= n_input:
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.shortcut.weight.data.uniform_(-0.01, 0.01)
self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0)
def init_weights(self, init_scale=1.0):
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
nn.init.constant_(self.conv2.weight, 0.0)
def forward(self, x):
h_res = self.conv2(self.relu1(self.bn1(self.conv1(x))))
h = self.bn_out(h_res + self.shortcut(x))
return h
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_input': 4, 'n_output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full([1], 1, tl.int32)
tmp12 = tmp11 / tmp10
tmp13 = 1.0
tmp14 = tmp12 * tmp13
tmp15 = tmp6 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x3, tmp19, xmask)
tl.store(out_ptr1 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = extern_kernels.convolution(primals_2, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1[
grid(256)](buf2, buf3, primals_5, primals_6, primals_7,
primals_8, primals_9, buf4, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del buf3
del primals_5
del primals_6
del primals_9
return (buf4, primals_1, primals_2, primals_3, primals_4, primals_7,
primals_8, buf1, buf5)
class MaybeBatchNorm2d(nn.Module):
def __init__(self, n_ftr, affine, use_bn):
super(MaybeBatchNorm2d, self).__init__()
self.bn = nn.BatchNorm2d(n_ftr, affine=affine)
self.use_bn = use_bn
def forward(self, x):
if self.use_bn:
x = self.bn(x)
return x
class FakeRKHSConvNetNew(nn.Module):
def __init__(self, n_input, n_output, use_bn=False):
super(FakeRKHSConvNetNew, self).__init__()
self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn_out = MaybeBatchNorm2d(n_output, True, True)
self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride=
1, padding=0, bias=True)
if n_output >= n_input:
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.shortcut.weight.data.uniform_(-0.01, 0.01)
self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0)
def init_weights(self, init_scale=1.0):
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
nn.init.constant_(self.conv2.weight, 0.0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_5 = self.bn1.bn.weight
primals_6 = self.bn1.bn.bias
primals_3 = self.conv2.weight
primals_7 = self.bn_out.bn.weight
primals_8 = self.bn_out.bn.bias
primals_4 = self.shortcut.weight
primals_9 = self.shortcut.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| jfrancis71/pytorch-lightning-bolts | FakeRKHSConvNet | false | 3,829 | [
"Apache-2.0"
] | 0 | 8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | https://github.com/jfrancis71/pytorch-lightning-bolts/tree/8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | import math
import torch
import numpy as np
import torch.nn as nn
class MaybeBatchNorm2d(nn.Module):
def __init__(self, n_ftr, affine, use_bn):
super().__init__()
self.bn = nn.BatchNorm2d(n_ftr, affine=affine)
self.use_bn = use_bn
def forward(self, x):
if self.use_bn:
x = self.bn(x)
return x
class Model(nn.Module):
def __init__(self, n_input, n_output, use_bn=False):
super().__init__()
self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn_out = MaybeBatchNorm2d(n_output, True, True)
self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride=
1, padding=0, bias=True)
if n_output >= n_input:
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.shortcut.weight.data.uniform_(-0.01, 0.01)
self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0)
def init_weights(self, init_scale=1.0):
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
nn.init.constant_(self.conv2.weight, 0.0)
def forward(self, x):
h_res = self.conv2(self.relu1(self.bn1(self.conv1(x))))
h = self.bn_out(h_res + self.shortcut(x))
return h
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SchedulerTestNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ej/cejq4hvgtngsrt5ywcfdheadnaab2ydhtz352v7vusjumjik42f2.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4b/c4bm44vlkroihtm4ebt4iyykoeyhr2cwl6horqusip6sdixccmyf.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16384, grid=grid(16384), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 16384, grid=grid(16384), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
class SchedulerTestNet(torch.nn.Module):
"""
adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py
"""
def __init__(self):
super(SchedulerTestNet, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(16384)](buf3, primals_5, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class SchedulerTestNetNew(torch.nn.Module):
"""
adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py
"""
def __init__(self):
super(SchedulerTestNetNew, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| jfrancis71/pytorch-lightning-bolts | SchedulerTestNet | false | 3,830 | [
"Apache-2.0"
] | 0 | 8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | https://github.com/jfrancis71/pytorch-lightning-bolts/tree/8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | import torch
from torch.nn import functional as F
class Model(torch.nn.Module):
"""
adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py
"""
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
InverseSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7f/c7fmblnwrd5xkevggxa3ftrkz2zx6aw6igd7dqct57u7fjjrv7an.py
# Topologically Sorted Source Nodes: [neg, sub, truediv, log], Original ATen: [aten.neg, aten.sub, aten.div, aten.log]
# Source node to ATen node mapping:
# log => log
# neg => neg
# sub => sub
# truediv => div
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %sub), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {})
triton_poi_fused_div_log_neg_sub_0 = async_compile.triton('triton_poi_fused_div_log_neg_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_log_neg_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_log_neg_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp0 - tmp2
tmp4 = tmp1 / tmp3
tmp5 = tl_math.log(tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, sub, truediv, log], Original ATen: [aten.neg, aten.sub, aten.div, aten.log]
stream0 = get_raw_stream(0)
triton_poi_fused_div_log_neg_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
def inverseSigmoid(y):
"""
inverse of y=torch.sigmoid(y)
:param y:
:return: x
"""
return torch.log(-y / (y - 1))
class InverseSigmoid(torch.nn.Module):
def forward(self, y):
return inverseSigmoid(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_log_neg_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = 1.0
tmp3 = tmp0 - tmp2
tmp4 = tmp1 / tmp3
tmp5 = tl_math.log(tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_log_neg_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def inverseSigmoid(y):
"""
inverse of y=torch.sigmoid(y)
:param y:
:return: x
"""
return torch.log(-y / (y - 1))
class InverseSigmoidNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| khoehlein/fV-SRN | InverseSigmoid | false | 3,831 | [
"MIT"
] | 0 | 601f3e952b090df92e875c233c2c9ca646523948 | https://github.com/khoehlein/fV-SRN/tree/601f3e952b090df92e875c233c2c9ca646523948 | import torch
import torch.utils.data
def inverseSigmoid(y):
"""
inverse of y=torch.sigmoid(y)
:param y:
:return: x
"""
return torch.log(-y / (y - 1))
class Model(torch.nn.Module):
def forward(self, y):
return inverseSigmoid(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
InverseSoftplus | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/rb/crbawmnl4bjgqmcf7mval37vmn4rkpg4yjtuxascde4vrvequody.py
# Topologically Sorted Source Nodes: [mul, gt, mul_1, exp, sub, log, truediv, where], Original ATen: [aten.mul, aten.gt, aten.exp, aten.sub, aten.log, aten.div, aten.where]
# Source node to ATen node mapping:
# exp => exp
# gt => gt
# log => log
# mul => mul
# mul_1 => mul_1
# sub => sub
# truediv => div
# where => where
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log, 1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %div), kwargs = {})
triton_poi_fused_div_exp_gt_log_mul_sub_where_0 = async_compile.triton('triton_poi_fused_div_exp_gt_log_mul_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_gt_log_mul_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_exp_gt_log_mul_sub_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = tmp5 - tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp7 * tmp1
tmp9 = tl.where(tmp4, tmp0, tmp8)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, gt, mul_1, exp, sub, log, truediv, where], Original ATen: [aten.mul, aten.gt, aten.exp, aten.sub, aten.log, aten.div, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_div_exp_gt_log_mul_sub_where_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
def inverseSoftplus(y, beta=1, threshold=20):
"""
inverse of y=torch.nn.functional.softplus(x, beta, threshold)
:param y: the output of the softplus
:param beta: the smoothness of the step
:param threshold: the threshold after which a linear function is used
:return: the input
"""
return torch.where(y * beta > threshold, y, torch.log(torch.exp(beta *
y) - 1) / beta)
class InverseSoftplus(torch.nn.Module):
def __init__(self, beta=1, threshold=20):
super().__init__()
self._beta = beta
self._threshold = threshold
def forward(self, y):
return inverseSoftplus(y, self._beta, self._threshold)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_gt_log_mul_sub_where_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = tmp5 - tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp7 * tmp1
tmp9 = tl.where(tmp4, tmp0, tmp8)
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_gt_log_mul_sub_where_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def inverseSoftplus(y, beta=1, threshold=20):
"""
inverse of y=torch.nn.functional.softplus(x, beta, threshold)
:param y: the output of the softplus
:param beta: the smoothness of the step
:param threshold: the threshold after which a linear function is used
:return: the input
"""
return torch.where(y * beta > threshold, y, torch.log(torch.exp(beta *
y) - 1) / beta)
class InverseSoftplusNew(torch.nn.Module):
def __init__(self, beta=1, threshold=20):
super().__init__()
self._beta = beta
self._threshold = threshold
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| khoehlein/fV-SRN | InverseSoftplus | false | 3,832 | [
"MIT"
] | 0 | 601f3e952b090df92e875c233c2c9ca646523948 | https://github.com/khoehlein/fV-SRN/tree/601f3e952b090df92e875c233c2c9ca646523948 | import torch
import torch.utils.data
def inverseSoftplus(y, beta=1, threshold=20):
"""
inverse of y=torch.nn.functional.softplus(x, beta, threshold)
:param y: the output of the softplus
:param beta: the smoothness of the step
:param threshold: the threshold after which a linear function is used
:return: the input
"""
return torch.where(y * beta > threshold, y, torch.log(torch.exp(beta *
y) - 1) / beta)
class Model(torch.nn.Module):
def __init__(self, beta=1, threshold=20):
super().__init__()
self._beta = beta
self._threshold = threshold
def forward(self, y):
return inverseSoftplus(y, self._beta, self._threshold)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/gy/cgyfxbdoyn6aqocgkacshriylcks5c6kt3gfzmwpealzqpvxe2qq.py
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 8
x3 = (xindex // 128)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (32*x1) + (128*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sd/csdzc7ihn23c3cq4w4usapmdgqsgbmeydp7obuzkly4qrdi7ghk5.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nr/cnr2l3l4x36tjfbtrutwg2z6iijamnnvubvppho2khkq56fs453d.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wf/cwfnu7iigx37txvjlhi5dkscdizq3ojcwgdznc7shmckykzg5ba5.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 8
x2 = (xindex // 32) % 4
x3 = (xindex // 128)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (128*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (32, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (32, 4), (4, 1))
assert_size_stride(primals_5, (4, 32), (32, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, buf3, 512, grid=grid(512), stream=stream0)
buf4 = reinterpret_tensor(buf1, (4, 8, 4, 4), (128, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf0, buf4, 512, grid=grid(512), stream=stream0)
buf5 = reinterpret_tensor(buf0, (32, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [raw_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (32, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 512, grid=grid(512), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 512, grid=grid(512), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 8, 4, 4), (128, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, buf8, 512, grid=grid(512), stream=stream0)
buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (32, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf9, buf10, 512, grid=grid(512), stream=stream0)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), reinterpret_tensor(primals_5, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf11)
del primals_6
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), primals_5, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (32, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (32, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
import torch.nn.functional as F
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size, n_heads=8, mask=False):
"""
Arguments:
emb_size: Size of input Embeddings
n_heads: Number of heads for MultiHead Attention
Layers:
tokeys: For Keys
toquery: For Query
tovalue: For Value
combine_heads: Convert the ( (number of heads) * (emb_size) ) into (emb_size) for output
Note:
The input dimension we give to the Linear Layer (nn.Linear) is the last dimension (no. of columns) in size
(batch_size ,seq_length, input_dims) ==> input_dims : give to nn.Linear
So in order for the tensor to pass through linear layer having the last dim equal to input_dim of
Linear layer is necessary.
"""
super(MultiHeadAttention, self).__init__()
self.emb_size = emb_size
self.n_heads = n_heads
"""
Explanation Comment:
For a single head attention the input will be of emb_size and output will be of emb_size Linear(emb_size,emb_size) but in multihead attention
there are number of heads (n_heads) and each head has it's own key(k), query(q) and value(v) so total number of k,q,v
will be equal to n_heads.
We can do that in a single linear layer by giving the size of output layer = n_head * emb_size (here emb_size = output size)
Same for k,q,v. nn.Linear(emb_size,n_heads*emb_size)
"""
self.tokeys = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.toquery = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.tovalue = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.combine_heads = nn.Linear(n_heads * emb_size, emb_size)
self.Attnweights = None
self.mask = mask
def __MatrixMask(self, matrix, mask_value=float('-inf'), diagonal=False):
"""
In self attention mask is used when we are trying to predict the next work based on the previous sequence
but attention inherently contains information about all the words in the sequence.
In order to accuretly predict the next word without sort of cheating by looking ahead with attention we set all
the attention weights of the next words, tokens = -inf or zero, so that there is no input from the sequence ahead
of the current word
[ 1 2 3 [ 1 0 0
4 5 6 =========> 4 5 0
7 8 9 ] 7 8 9 ]
"""
row, column = matrix.size(-2), matrix.size(-1)
offset = 0 if diagonal else 1
upper_triangular_idx = torch.triu_indices(row, column, offset)
matrix[..., upper_triangular_idx[0], upper_triangular_idx[1]
] = mask_value
return matrix
def forward(self, x):
"""
Argument:
x: shoud be of shape [batch,sequence length, emb_size]
Local variable:
b: batch size
t: length of sequence
e: embedding size of each individual input token/word
h: number of heads
"""
b, t, e = x.size()
h = self.n_heads
assert e == self.emb_size, f'Input size expected to be {self.emb_size} but got {e}'
keys = self.tokeys(x).view(b, t, h, e)
query = self.toquery(x).view(b, t, h, e)
value = self.tovalue(x).view(b, t, h, e)
"""
Pytorch Contiguous() function is used to keep the indexes which are side by side, side by side in memory also
"""
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
query = query.transpose(1, 2).contiguous().view(b * h, t, e)
value = value.transpose(1, 2).contiguous().view(b * h, t, e)
raw_weights = torch.bmm(query, keys.transpose(1, 2))
raw_weights /= math.sqrt(e)
assert raw_weights.size() == (b * h, t, t
), f'expected shape {b * h, t, t} got {raw_weights.size()}'
if self.mask:
raw_weights = self.__MatrixMask(raw_weights)
weights = F.softmax(raw_weights, dim=2)
self.Attnweights = weights
out = torch.bmm(weights, value).view(b, h, t, e)
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
out = self.combine_heads(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'emb_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 8
x3 = xindex // 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 32 * x1 + 128 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 8
x2 = xindex // 32 % 4
x3 = xindex // 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 128 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (32, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (32, 4), (4, 1))
assert_size_stride(primals_5, (4, 32), (32, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(512)](buf1, buf3, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf1, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(512)](buf0, buf4, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf0, (32, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf3, (32, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(512)](buf5, buf6, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_2[grid(512)](buf6, buf7, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf6
triton_poi_fused_clone_0[grid(512)](buf2, buf8, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (32, 4, 4), (16,
4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32
)
triton_poi_fused_clone_3[grid(512)](buf9, buf10, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 32),
(32, 1), 0), reinterpret_tensor(primals_5, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf11)
del primals_6
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0
), primals_5, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (32, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (32, 4, 4), (16, 4, 1), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, emb_size, n_heads=8, mask=False):
"""
Arguments:
emb_size: Size of input Embeddings
n_heads: Number of heads for MultiHead Attention
Layers:
tokeys: For Keys
toquery: For Query
tovalue: For Value
combine_heads: Convert the ( (number of heads) * (emb_size) ) into (emb_size) for output
Note:
The input dimension we give to the Linear Layer (nn.Linear) is the last dimension (no. of columns) in size
(batch_size ,seq_length, input_dims) ==> input_dims : give to nn.Linear
So in order for the tensor to pass through linear layer having the last dim equal to input_dim of
Linear layer is necessary.
"""
super(MultiHeadAttentionNew, self).__init__()
self.emb_size = emb_size
self.n_heads = n_heads
"""
Explanation Comment:
For a single head attention the input will be of emb_size and output will be of emb_size Linear(emb_size,emb_size) but in multihead attention
there are number of heads (n_heads) and each head has it's own key(k), query(q) and value(v) so total number of k,q,v
will be equal to n_heads.
We can do that in a single linear layer by giving the size of output layer = n_head * emb_size (here emb_size = output size)
Same for k,q,v. nn.Linear(emb_size,n_heads*emb_size)
"""
self.tokeys = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.toquery = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.tovalue = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.combine_heads = nn.Linear(n_heads * emb_size, emb_size)
self.Attnweights = None
self.mask = mask
def __MatrixMask(self, matrix, mask_value=float('-inf'), diagonal=False):
"""
In self attention mask is used when we are trying to predict the next work based on the previous sequence
but attention inherently contains information about all the words in the sequence.
In order to accuretly predict the next word without sort of cheating by looking ahead with attention we set all
the attention weights of the next words, tokens = -inf or zero, so that there is no input from the sequence ahead
of the current word
[ 1 2 3 [ 1 0 0
4 5 6 =========> 4 5 0
7 8 9 ] 7 8 9 ]
"""
row, column = matrix.size(-2), matrix.size(-1)
offset = 0 if diagonal else 1
upper_triangular_idx = torch.triu_indices(row, column, offset)
matrix[..., upper_triangular_idx[0], upper_triangular_idx[1]
] = mask_value
return matrix
def forward(self, input_0):
primals_2 = self.tokeys.weight
primals_3 = self.toquery.weight
primals_4 = self.tovalue.weight
primals_5 = self.combine_heads.weight
primals_6 = self.combine_heads.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| kcmankar/TransformerFromScratch | MultiHeadAttention | false | 3,833 | [
"MIT"
] | 0 | 4c68d507f3b0b9713822964e3769283ca0ddc685 | https://github.com/kcmankar/TransformerFromScratch/tree/4c68d507f3b0b9713822964e3769283ca0ddc685 | import math
import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, emb_size, n_heads=8, mask=False):
"""
Arguments:
emb_size: Size of input Embeddings
n_heads: Number of heads for MultiHead Attention
Layers:
tokeys: For Keys
toquery: For Query
tovalue: For Value
combine_heads: Convert the ( (number of heads) * (emb_size) ) into (emb_size) for output
Note:
The input dimension we give to the Linear Layer (nn.Linear) is the last dimension (no. of columns) in size
(batch_size ,seq_length, input_dims) ==> input_dims : give to nn.Linear
So in order for the tensor to pass through linear layer having the last dim equal to input_dim of
Linear layer is necessary.
"""
super().__init__()
self.emb_size = emb_size
self.n_heads = n_heads
"""
Explanation Comment:
For a single head attention the input will be of emb_size and output will be of emb_size Linear(emb_size,emb_size) but in multihead attention
there are number of heads (n_heads) and each head has it's own key(k), query(q) and value(v) so total number of k,q,v
will be equal to n_heads.
We can do that in a single linear layer by giving the size of output layer = n_head * emb_size (here emb_size = output size)
Same for k,q,v. nn.Linear(emb_size,n_heads*emb_size)
"""
self.tokeys = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.toquery = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.tovalue = nn.Linear(emb_size, n_heads * emb_size, bias=False)
self.combine_heads = nn.Linear(n_heads * emb_size, emb_size)
self.Attnweights = None
self.mask = mask
def __MatrixMask(self, matrix, mask_value=float('-inf'), diagonal=False):
"""
In self attention mask is used when we are trying to predict the next work based on the previous sequence
but attention inherently contains information about all the words in the sequence.
In order to accuretly predict the next word without sort of cheating by looking ahead with attention we set all
the attention weights of the next words, tokens = -inf or zero, so that there is no input from the sequence ahead
of the current word
[ 1 2 3 [ 1 0 0
4 5 6 =========> 4 5 0
7 8 9 ] 7 8 9 ]
"""
row, column = matrix.size(-2), matrix.size(-1)
offset = 0 if diagonal else 1
upper_triangular_idx = torch.triu_indices(row, column, offset)
matrix[..., upper_triangular_idx[0], upper_triangular_idx[1]
] = mask_value
return matrix
def forward(self, x):
"""
Argument:
x: shoud be of shape [batch,sequence length, emb_size]
Local variable:
b: batch size
t: length of sequence
e: embedding size of each individual input token/word
h: number of heads
"""
b, t, e = x.size()
h = self.n_heads
assert e == self.emb_size, f'Input size expected to be {self.emb_size} but got {e}'
keys = self.tokeys(x).view(b, t, h, e)
query = self.toquery(x).view(b, t, h, e)
value = self.tovalue(x).view(b, t, h, e)
"""
Pytorch Contiguous() function is used to keep the indexes which are side by side, side by side in memory also
"""
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
query = query.transpose(1, 2).contiguous().view(b * h, t, e)
value = value.transpose(1, 2).contiguous().view(b * h, t, e)
raw_weights = torch.bmm(query, keys.transpose(1, 2))
raw_weights /= math.sqrt(e)
assert raw_weights.size() == (b * h, t, t
), f'expected shape {b * h, t,
# ... truncated (>4000 chars) for memory efficiency |
decoder3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/24/c24cft5766glssqi3nl3jamflubhwo6cifxhbmngzrlssnsny2d4.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# out => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 6) % 6
x0 = xindex % 6
x2 = (xindex // 36)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kk/ckkeehad7xvjmxduxmwzjdm4zu3f5inttmd6kj4pju55xhwrnoea.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# out_3 => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6p/c6pcnr6haywitt75cdo5ocyrs5wn7f5ybaeca5sgtfqhj7zpya72.py
# Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => relu
# out_3 => _unsafe_index
# out_4 => constant_pad_nd_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%_unsafe_index, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 204800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 10) % 10
x0 = xindex % 10
x4 = (xindex // 100)
x2 = (xindex // 100) % 512
x7 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-1) + x1), tmp10, eviction_policy='evict_last', other=0.0)
tmp12 = tl.full([XBLOCK], 4, tl.int32)
tmp13 = tmp11 + tmp12
tmp14 = tmp11 < 0
tmp15 = tl.where(tmp14, tmp13, tmp11)
tmp16 = tl.load(in_ptr0 + ((-1) + x0), tmp10, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr1 + (tmp19 + (4*tmp15) + (16*x4)), tmp10, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr2 + (x2), tmp10, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.full([1], 0, tl.int32)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp10, tmp24, tmp25)
tl.store(out_ptr0 + (x7), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2q/c2qwr3ogotr4dmpj572s4j5gzks4b5bmiqaev4sffuruoxf237c4.py
# Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_5 => convolution_1
# out_6 => relu_1
# out_7 => constant_pad_nd_2
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu_1, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_3 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 204800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 10) % 10
x0 = xindex % 10
x4 = (xindex // 100)
x2 = (xindex // 100) % 512
x6 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-9) + x0 + (8*x1) + (64*x4)), tmp10, other=0.0)
tmp12 = tl.load(in_ptr1 + (x2), tmp10, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (x6), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7n/c7nyyvchtptp5rbqvks7cavg63gajcswx5tpkvypdmc7ocd2zaaz.py
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# out_10 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_2, mul_4, mul_5
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y4/cy4sf7e6wj4mhqc65cpqsghygal5cptd3bmph37zmihjqh5p7twg.py
# Topologically Sorted Source Nodes: [out_8, out_9, out_10, out_11], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_10 => _unsafe_index_1
# out_11 => constant_pad_nd_3
# out_8 => convolution_2
# out_9 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
# %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%_unsafe_index_1, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_5 = async_compile.triton('triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 331776
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 18) % 18
x0 = xindex % 18
x4 = (xindex // 324)
x2 = (xindex // 324) % 256
x7 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-1) + x1), tmp10, eviction_policy='evict_last', other=0.0)
tmp12 = tl.full([XBLOCK], 8, tl.int32)
tmp13 = tmp11 + tmp12
tmp14 = tmp11 < 0
tmp15 = tl.where(tmp14, tmp13, tmp11)
tmp16 = tl.load(in_ptr0 + ((-1) + x0), tmp10, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr1 + (tmp19 + (8*tmp15) + (64*x4)), tmp10, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr2 + (x2), tmp10, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.full([1], 0, tl.int32)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp10, tmp24, tmp25)
tl.store(out_ptr0 + (x7), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jd/cjd5tvhgf5atwgncp5jfzwc4f6d37fe6c72za6gapozbt4qxkgbj.py
# Topologically Sorted Source Nodes: [out_12, out_13, out_14], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_12 => convolution_3
# out_13 => relu_3
# out_14 => constant_pad_nd_4
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %constant_pad_nd_4 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu_3, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_6 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 41472
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 18) % 18
x0 = xindex % 18
x4 = (xindex // 324)
x2 = (xindex // 324) % 32
x6 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-17) + x0 + (16*x1) + (256*x4)), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + (x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (x6), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mr/cmrm4ykglutda4amq2nrfeaf6rom3vrpet2lshx4jfysv3ti4tug.py
# Topologically Sorted Source Nodes: [out_15, clamp, out_16], Original ATen: [aten.convolution, aten.clamp, aten.mul, aten.ge, aten.le, aten.logical_and]
# Source node to ATen node mapping:
# clamp => clamp_max, clamp_min
# out_15 => convolution_4
# out_16 => mul_8
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_4, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%convolution_4, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 255), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%convolution_4, 0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%convolution_4, 1), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {})
triton_poi_fused_clamp_convolution_ge_le_logical_and_mul_7 = async_compile.triton('triton_poi_fused_clamp_convolution_ge_le_logical_and_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_convolution_ge_le_logical_and_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_convolution_ge_le_logical_and_mul_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 256) % 3
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 255.0
tmp8 = tmp6 * tmp7
tmp9 = tmp2 >= tmp3
tmp10 = tmp2 <= tmp5
tmp11 = tmp9 & tmp10
tl.store(out_ptr0 + (x3), tmp8, xmask)
tl.store(out_ptr1 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vc/cvcrvlqhpiwwtiswor3lenxbvgjdzja5sdckr7x5idxwr6uq7c3m.py
# Topologically Sorted Source Nodes: [out_12, out_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_12 => convolution_3
# out_13 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sb/csbbpgymg3xtjomfpavjmecnsnjkg47yz3lgdjmycwplrvipirey.py
# Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_8 => convolution_2
# out_9 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hw/chwkahplvub2epl3duf3up4j65qfnsz4kmowwjs3ssv7xfxyyuhg.py
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_5 => convolution_1
# out_6 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qg/cqg5mlre6l5grgdof2o2xiy7poq62urxvxd6l67ttvxx3u4ukdgf.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 4, 4), (16384, 16, 4, 1))
assert_size_stride(primals_2, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_3, (512, ), (1, ))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (32, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024, 6, 6), (36864, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 147456, grid=grid(147456), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 4, 4), (8192, 16, 4, 1))
buf2 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 8, grid=grid(8), stream=stream0)
buf3 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.constant_pad_nd]
triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_2.run(buf2, buf1, primals_3, buf3, 204800, grid=grid(204800), stream=stream0)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 8, 8), (32768, 64, 8, 1))
buf5 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_3.run(buf4, primals_5, buf5, 204800, grid=grid(204800), stream=stream0)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_4.run(buf7, 16, grid=grid(16), stream=stream0)
buf8 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_8, out_9, out_10, out_11], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.constant_pad_nd]
triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_5.run(buf7, buf6, primals_7, buf8, 331776, grid=grid(331776), stream=stream0)
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 16, 16), (8192, 256, 16, 1))
buf10 = empty_strided_cuda((4, 32, 18, 18), (10368, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_12, out_13, out_14], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_6.run(buf9, primals_9, buf10, 41472, grid=grid(41472), stream=stream0)
# Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 3, 16, 16), (768, 256, 16, 1))
buf12 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch.float32)
buf13 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_15, clamp, out_16], Original ATen: [aten.convolution, aten.clamp, aten.mul, aten.ge, aten.le, aten.logical_and]
triton_poi_fused_clamp_convolution_ge_le_logical_and_mul_7.run(buf11, primals_11, buf12, buf13, 3072, grid=grid(3072), stream=stream0)
del buf11
del primals_11
buf14 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_12, out_13], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf9, primals_9, buf14, 32768, grid=grid(32768), stream=stream0)
del buf9
del primals_9
buf15 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf6, primals_7, buf15, 65536, grid=grid(65536), stream=stream0)
del buf6
del primals_7
buf16 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_10.run(buf4, primals_5, buf16, 131072, grid=grid(131072), stream=stream0)
del buf4
del primals_5
buf17 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf1, primals_3, buf17, 32768, grid=grid(32768), stream=stream0)
del buf1
del primals_3
return (buf12, primals_2, primals_4, primals_6, primals_8, primals_10, buf0, buf2, buf3, buf5, buf7, buf8, buf10, buf13, buf14, buf15, buf16, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1024, 4, 4), (16384, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((3, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch
import torch.nn as nn
class decoder3(nn.Module):
def __init__(self, W, v2):
super(decoder3, self).__init__()
self.reflecPad7 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(int(256 * W), int(128 * W), 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad8 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(int(128 * W), int(128 * W), 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(int(128 * W), int(64 * W), 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad10 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(int(64 * W), 32 if v2 else int(64 * W), 3, 1, 0
)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(32 if v2 else int(64 * W), 3, 3, 1, 0)
def forward(self, x):
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.unpool2(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
out = out.clamp(0, 1) * 255
return out
def get_inputs():
return [torch.rand([4, 1024, 4, 4])]
def get_init_inputs():
return [[], {'W': 4, 'v2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10, other=0.0)
tl.store(out_ptr0 + x4, tmp11, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 512
x7 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-1 + x1), tmp10, eviction_policy=
'evict_last', other=0.0)
tmp12 = tl.full([XBLOCK], 4, tl.int32)
tmp13 = tmp11 + tmp12
tmp14 = tmp11 < 0
tmp15 = tl.where(tmp14, tmp13, tmp11)
tmp16 = tl.load(in_ptr0 + (-1 + x0), tmp10, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr1 + (tmp19 + 4 * tmp15 + 16 * x4), tmp10,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr2 + x2, tmp10, eviction_policy='evict_last',
other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.full([1], 0, tl.int32)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp10, tmp24, tmp25)
tl.store(out_ptr0 + x7, tmp26, None)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 512
x6 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-9 + x0 + 8 * x1 + 64 * x4), tmp10, other=0.0)
tmp12 = tl.load(in_ptr1 + x2, tmp10, eviction_policy='evict_last',
other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + x6, tmp17, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 256
x7 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-1 + x1), tmp10, eviction_policy=
'evict_last', other=0.0)
tmp12 = tl.full([XBLOCK], 8, tl.int32)
tmp13 = tmp11 + tmp12
tmp14 = tmp11 < 0
tmp15 = tl.where(tmp14, tmp13, tmp11)
tmp16 = tl.load(in_ptr0 + (-1 + x0), tmp10, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr1 + (tmp19 + 8 * tmp15 + 64 * x4), tmp10,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr2 + x2, tmp10, eviction_policy='evict_last',
other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.full([1], 0, tl.int32)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp10, tmp24, tmp25)
tl.store(out_ptr0 + x7, tmp26, None)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 41472
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 32
x6 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-17 + x0 + 16 * x1 + 256 * x4), tmp10 &
xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + x2, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + x6, tmp17, xmask)
@triton.jit
def triton_poi_fused_clamp_convolution_ge_le_logical_and_mul_7(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 256 % 3
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 255.0
tmp8 = tmp6 * tmp7
tmp9 = tmp2 >= tmp3
tmp10 = tmp2 <= tmp5
tmp11 = tmp9 & tmp10
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr1 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1024, 4, 4), (16384, 16, 4, 1))
assert_size_stride(primals_2, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_3, (512,), (1,))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (32, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (3, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024, 6, 6), (36864, 36, 6, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(147456)](primals_1, buf0,
147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 4, 4), (8192, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_2[grid
(204800)](buf2, buf1, primals_3, buf3, 204800, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 8, 8), (32768, 64, 8, 1))
buf5 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_3[grid(204800)](buf4,
primals_5, buf5, 204800, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_constant_pad_nd_convolution_relu_5[grid
(331776)](buf7, buf6, primals_7, buf8, 331776, XBLOCK=512,
num_warps=8, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 16, 16), (8192, 256, 16, 1))
buf10 = empty_strided_cuda((4, 32, 18, 18), (10368, 324, 18, 1),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_6[grid(41472)](buf9,
primals_9, buf10, 41472, XBLOCK=256, num_warps=4, num_stages=1)
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 3, 16, 16), (768, 256, 16, 1))
buf12 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch
.float32)
buf13 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch
.bool)
triton_poi_fused_clamp_convolution_ge_le_logical_and_mul_7[grid(3072)](
buf11, primals_11, buf12, buf13, 3072, XBLOCK=128, num_warps=4,
num_stages=1)
del buf11
del primals_11
buf14 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(32768)](
buf9, primals_9, buf14, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del buf9
del primals_9
buf15 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(65536)](
buf6, primals_7, buf15, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf16 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(131072)](
buf4, primals_5, buf16, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del buf4
del primals_5
buf17 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(32768)](
buf1, primals_3, buf17, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_3
return (buf12, primals_2, primals_4, primals_6, primals_8, primals_10,
buf0, buf2, buf3, buf5, buf7, buf8, buf10, buf13, buf14, buf15,
buf16, buf17)
class decoder3New(nn.Module):
def __init__(self, W, v2):
super(decoder3New, self).__init__()
self.reflecPad7 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(int(256 * W), int(128 * W), 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad8 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(int(128 * W), int(128 * W), 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(int(128 * W), int(64 * W), 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad10 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(int(64 * W), 32 if v2 else int(64 * W), 3, 1, 0
)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(32 if v2 else int(64 * W), 3, 3, 1, 0)
def forward(self, input_0):
primals_2 = self.conv7.weight
primals_3 = self.conv7.bias
primals_4 = self.conv8.weight
primals_5 = self.conv8.bias
primals_6 = self.conv9.weight
primals_7 = self.conv9.bias
primals_8 = self.conv10.weight
primals_9 = self.conv10.bias
primals_10 = self.conv11.weight
primals_11 = self.conv11.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| kamieen03/style-transfer-server | decoder3 | false | 3,834 | [
"BSD-2-Clause"
] | 0 | 91727ec62080215a0b870ce043faf0657137b84b | https://github.com/kamieen03/style-transfer-server/tree/91727ec62080215a0b870ce043faf0657137b84b | import torch
import torch.nn
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, W, v2):
super().__init__()
self.reflecPad7 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(int(256 * W), int(128 * W), 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad8 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(int(128 * W), int(128 * W), 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(int(128 * W), int(64 * W), 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad10 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(int(64 * W), 32 if v2 else int(64 * W), 3, 1, 0
)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv11 = nn.Conv2d(32 if v2 else int(64 * W), 3, 3, 1, 0)
def forward(self, x):
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.unpool2(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
out = out.clamp(0, 1) * 255
return out
def get_inputs():
return [torch.rand([4, 1024, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LReluCustom | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ac/cacxtgnro7ndp6nwnutsryio2ao6xnvb3lamty4ptfdbos73qfys.py
# Topologically Sorted Source Nodes: [mul, max_1], Original ATen: [aten.mul, aten.maximum]
# Source node to ATen node mapping:
# max_1 => maximum
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%arg0_1, %mul), kwargs = {})
triton_poi_fused_maximum_mul_0 = async_compile.triton('triton_poi_fused_maximum_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_maximum_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.1
tmp2 = tmp0 * tmp1
tmp3 = triton_helpers.maximum(tmp0, tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, max_1], Original ATen: [aten.mul, aten.maximum]
stream0 = get_raw_stream(0)
triton_poi_fused_maximum_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class LReluCustom(nn.Module):
def __init__(self, leak=0.1):
super(LReluCustom, self).__init__()
self.leak = leak
def forward(self, x):
return torch.max(x, self.leak * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_maximum_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.1
tmp2 = tmp0 * tmp1
tmp3 = triton_helpers.maximum(tmp0, tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LReluCustomNew(nn.Module):
def __init__(self, leak=0.1):
super(LReluCustomNew, self).__init__()
self.leak = leak
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| kkulczak/phrases_reconstruction_GAN | LReluCustom | false | 3,835 | [
"MIT"
] | 0 | 5cf273416eb714f813a8d603942a442f0933cbff | https://github.com/kkulczak/phrases_reconstruction_GAN/tree/5cf273416eb714f813a8d603942a442f0933cbff | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, leak=0.1):
super().__init__()
self.leak = leak
def forward(self, x):
return torch.max(x, self.leak * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wl/cwlk66iilteraigfupgubhpxy3pcmh4eoclrhow5zivesmao3jbn.py
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
# Source node to ATen node mapping:
# add => add
# distance_negative => sum_2
# distance_positive => sum_1
# losses => relu
# mean => mean
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg2_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 2.0), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_add_mean_pow_relu_sub_sum_0 = async_compile.triton('triton_per_fused_add_mean_pow_relu_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_relu_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp19 = tl.load(in_ptr2 + (r0 + (64*r1)), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 2.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, distance_positive, sub_1, pow_2, distance_negative, sub_2, add, losses, mean], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=2.0):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_relu_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp30 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = tmp0 - tmp19
tmp21 = tmp20 * tmp20
tmp23 = tmp4 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp9 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp25 + tmp28
tmp31 = tmp14 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp29 + tmp32
tmp34 = tmp18 - tmp33
tmp35 = 2.0
tmp36 = tmp34 + tmp35
tmp37 = tl.full([1, 1], 0, tl.int32)
tmp38 = triton_helpers.maximum(tmp37, tmp36)
tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp41 = tl.sum(tmp39, 1)[:, None]
tmp42 = 64.0
tmp43 = tmp41 / tmp42
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_relu_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class TripletLossNew(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=2.0):
super(TripletLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| kmi-robots/object_reasoner | TripletLoss | false | 3,836 | [
"Apache-2.0"
] | 0 | 2d45bdb3ee745e0d866a152e8d81cbb375fa2985 | https://github.com/kmi-robots/object_reasoner/tree/2d45bdb3ee745e0d866a152e8d81cbb375fa2985 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=2.0):
super().__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average=True):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# h_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2y/c2y2depfxa2z6x54ydtq4sztgglwdvphffk5xy2ad6meezomjm6h.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# out => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_7, 2), kwargs = {})
triton_poi_fused_pow_1 = async_compile.triton('triton_poi_fused_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.pow]
triton_poi_fused_pow_1.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, buf6, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class Net(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(Net, self).__init__()
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_hidden)
self.layer4 = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
h_3 = sigmoid(self.layer3(h_2))
out = self.layer4(h_3) ** 2
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_input': 4, 'n_hidden': 4, 'n_output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_0[grid(256)](buf5, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_pow_1[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, buf6, primals_8, primals_6, primals_4
class NetNew(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super(NetNew, self).__init__()
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_hidden)
self.layer4 = torch.nn.Linear(n_hidden, n_output)
def forward(self, input_0):
primals_1 = self.layer1.weight
primals_2 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.layer3.weight
primals_7 = self.layer3.bias
primals_8 = self.layer4.weight
primals_9 = self.layer4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| kimukook/variable_length_oscillating_pendulum | Net | false | 3,837 | [
"MIT"
] | 0 | 486aa95fe4b9cbaa6cbeb542209259484f48e191 | https://github.com/kimukook/variable_length_oscillating_pendulum/tree/486aa95fe4b9cbaa6cbeb542209259484f48e191 | import torch
class Model(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super().__init__()
self.layer1 = torch.nn.Linear(n_input, n_hidden)
self.layer2 = torch.nn.Linear(n_hidden, n_hidden)
self.layer3 = torch.nn.Linear(n_hidden, n_hidden)
self.layer4 = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
sigmoid = torch.nn.Tanh()
h_1 = sigmoid(self.layer1(x))
h_2 = sigmoid(self.layer2(h_1))
h_3 = sigmoid(self.layer3(h_2))
out = self.layer4(h_3) ** 2
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oe/coeqskvavjig2xfptqbr47z2ukzhyagx4dxozc4kzjwb2ykujlu4.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt, mul, where
# Graph fragment:
# %add_tensor_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_3), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_3, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_3, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_3, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uo/cuorclbf4h3pyyjppsftkziufkt34vxjph7yux3ly7ujjfqkt7ad.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_1, mul_1, where_1
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_5), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_2, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mf/cmfwsgrlk36ejtu74rx7ipaja6rfirwmwjgqhdqter22spc6ajam.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_5 => gt_2, mul_2, where_2
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.2), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_tensor_1, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rh/crhvyy3w3uejbzndu7qftnyc25sndrfzlmb3i2bzpyadobz7z7bm.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024, ), (1, ))
assert_size_stride(primals_4, (512, 1024), (1024, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (256, 512), (512, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (1, 256), (256, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 4096, grid=grid(4096), stream=stream0)
del buf0
del primals_3
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.leaky_relu, aten.native_dropout]
buf3 = torch.ops.aten.native_dropout.default(buf2, 0.3, True)
del buf2
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf6)
buf7 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf6, primals_5, buf7, buf8, 2048, grid=grid(2048), stream=stream0)
del buf6
del primals_5
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.leaky_relu, aten.native_dropout]
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.3, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf12)
buf13 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf14 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf12, primals_7, buf13, buf14, 1024, grid=grid(1024), stream=stream0)
del buf12
del primals_7
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.leaky_relu, aten.native_dropout]
buf15 = torch.ops.aten.native_dropout.default(buf14, 0.3, True)
del buf14
buf16 = buf15[0]
buf17 = buf15[1]
del buf15
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (256, 1), (1, 256), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf19, primals_9, 4, grid=grid(4), stream=stream0)
del primals_9
return (buf19, primals_1, buf1, buf4, buf5, buf7, buf10, buf11, buf13, buf16, buf17, buf19, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
class Discriminator(nn.Module):
def __init__(self, img_shape, hidden_dim=1024):
super().__init__()
in_dim = int(np.prod(img_shape))
self.fc1 = nn.Linear(in_dim, hidden_dim)
self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2)
self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2)
self.fc4 = nn.Linear(self.fc3.out_features, 1)
def forward(self, img):
x = img.view(img.size(0), -1)
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc2(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc3(x), 0.2)
x = F.dropout(x, 0.3)
return torch.sigmoid(self.fc4(x))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'img_shape': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (512, 1024), (1024, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (256, 512), (512, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (1, 256), (256, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024
), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(4096)](buf0, primals_3, buf1,
buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
buf3 = torch.ops.aten.native_dropout.default(buf2, 0.3, True)
del buf2
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1024, 512),
(1, 1024), 0), out=buf6)
buf7 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(2048)](buf6, primals_5, buf7,
buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_5
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.3, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (512, 256),
(1, 512), 0), out=buf12)
buf13 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf14 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(1024)](buf12, primals_7, buf13,
buf14, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf12
del primals_7
buf15 = torch.ops.aten.native_dropout.default(buf14, 0.3, True)
del buf14
buf16 = buf15[0]
buf17 = buf15[1]
del buf15
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (256, 1), (1,
256), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_sigmoid_3[grid(4)](buf19, primals_9, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_9
return (buf19, primals_1, buf1, buf4, buf5, buf7, buf10, buf11, buf13,
buf16, buf17, buf19, primals_8, primals_6, primals_4)
class DiscriminatorNew(nn.Module):
def __init__(self, img_shape, hidden_dim=1024):
super().__init__()
in_dim = int(np.prod(img_shape))
self.fc1 = nn.Linear(in_dim, hidden_dim)
self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2)
self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2)
self.fc4 = nn.Linear(self.fc3.out_features, 1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| jfrancis71/pytorch-lightning-bolts | Discriminator | false | 3,838 | [
"Apache-2.0"
] | 0 | 8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | https://github.com/jfrancis71/pytorch-lightning-bolts/tree/8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6 | import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, img_shape, hidden_dim=1024):
super().__init__()
in_dim = int(np.prod(img_shape))
self.fc1 = nn.Linear(in_dim, hidden_dim)
self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2)
self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2)
self.fc4 = nn.Linear(self.fc3.out_features, 1)
def forward(self, img):
x = img.view(img.size(0), -1)
x = F.leaky_relu(self.fc1(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc2(x), 0.2)
x = F.dropout(x, 0.3)
x = F.leaky_relu(self.fc3(x), 0.2)
x = F.dropout(x, 0.3)
return torch.sigmoid(self.fc4(x))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
ResidualSineLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cv/ccvh5pcjc57oskvep5v2okylauufvnesitt2j6dbyvr6fh3vwuer.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uc/cucjkoihef6w5fcrwkobehfnnpvpccnpkhd7kk7w56u3v35lluwv.py
# Topologically Sorted Source Nodes: [mul_1, sine_1], Original ATen: [aten.mul, aten.sin]
# Source node to ATen node mapping:
# mul_1 => mul_1
# sine_1 => sin
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 30), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul_1,), kwargs = {})
triton_poi_fused_mul_sin_1 = async_compile.triton('triton_poi_fused_mul_sin_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sin_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sin_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ze/czecxcuebnxd7ejgfkxi6i3l4c57bhim2n7rvlfurhha5pn3fcte.py
# Topologically Sorted Source Nodes: [mul_2, sine_2, add, mul_3], Original ATen: [aten.mul, aten.sin, aten.add]
# Source node to ATen node mapping:
# add => add
# mul_2 => mul_2
# mul_3 => mul_3
# sine_2 => sin_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 30), kwargs = {})
# %sin_1 : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %sin_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1), kwargs = {})
triton_poi_fused_add_mul_sin_2 = async_compile.triton('triton_poi_fused_add_mul_sin_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sin_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sin_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = 30.0
tmp3 = tmp1 * tmp2
tmp4 = tl_math.sin(tmp3)
tmp5 = tmp0 + tmp4
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, sine_1], Original ATen: [aten.mul, aten.sin]
triton_poi_fused_mul_sin_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, sine_2, add, mul_3], Original ATen: [aten.mul, aten.sin, aten.add]
triton_poi_fused_add_mul_sin_2.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
class ResidualSineLayer(nn.Module):
"""
From Lu & Berger 2021, Compressive Neural Representations of Volumetric Scalar Fields
https://github.com/matthewberger/neurcomp/blob/main/siren.py
"""
def __init__(self, features: 'int', bias=True, ave_first=False,
ave_second=False, omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.features = features
self.linear_1 = nn.Linear(features, features, bias=bias)
self.linear_2 = nn.Linear(features, features, bias=bias)
self.weight_1 = 0.5 if ave_first else 1
self.weight_2 = 0.5 if ave_second else 1
self.init_weights()
def init_weights(self):
with torch.no_grad():
self.linear_1.weight.uniform_(-np.sqrt(6 / self.features) /
self.omega_0, np.sqrt(6 / self.features) / self.omega_0)
self.linear_2.weight.uniform_(-np.sqrt(6 / self.features) /
self.omega_0, np.sqrt(6 / self.features) / self.omega_0)
def forward(self, input):
sine_1 = torch.sin(self.omega_0 * self.linear_1(self.weight_1 * input))
sine_2 = torch.sin(self.omega_0 * self.linear_2(sine_1))
return self.weight_2 * (input + sine_2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sin_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_add_mul_sin_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = 30.0
tmp3 = tmp1 * tmp2
tmp4 = tl_math.sin(tmp3)
tmp5 = tmp0 + tmp4
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sin_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sin_2[grid(256)](primals_1, buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, primals_4
class ResidualSineLayerNew(nn.Module):
"""
From Lu & Berger 2021, Compressive Neural Representations of Volumetric Scalar Fields
https://github.com/matthewberger/neurcomp/blob/main/siren.py
"""
def __init__(self, features: 'int', bias=True, ave_first=False,
ave_second=False, omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.features = features
self.linear_1 = nn.Linear(features, features, bias=bias)
self.linear_2 = nn.Linear(features, features, bias=bias)
self.weight_1 = 0.5 if ave_first else 1
self.weight_2 = 0.5 if ave_second else 1
self.init_weights()
def init_weights(self):
with torch.no_grad():
self.linear_1.weight.uniform_(-np.sqrt(6 / self.features) /
self.omega_0, np.sqrt(6 / self.features) / self.omega_0)
self.linear_2.weight.uniform_(-np.sqrt(6 / self.features) /
self.omega_0, np.sqrt(6 / self.features) / self.omega_0)
def forward(self, input_0):
primals_2 = self.linear_1.weight
primals_3 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| khoehlein/fV-SRN | ResidualSineLayer | false | 3,839 | [
"MIT"
] | 0 | 601f3e952b090df92e875c233c2c9ca646523948 | https://github.com/khoehlein/fV-SRN/tree/601f3e952b090df92e875c233c2c9ca646523948 | import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
From Lu & Berger 2021, Compressive Neural Representations of Volumetric Scalar Fields
https://github.com/matthewberger/neurcomp/blob/main/siren.py
"""
def __init__(self, features: 'int', bias=True, ave_first=False,
ave_second=False, omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.features = features
self.linear_1 = nn.Linear(features, features, bias=bias)
self.linear_2 = nn.Linear(features, features, bias=bias)
self.weight_1 = 0.5 if ave_first else 1
self.weight_2 = 0.5 if ave_second else 1
self.init_weights()
def init_weights(self):
with torch.no_grad():
self.linear_1.weight.uniform_(-np.sqrt(6 / self.features) /
self.omega_0, np.sqrt(6 / self.features) / self.omega_0)
self.linear_2.weight.uniform_(-np.sqrt(6 / self.features) /
self.omega_0, np.sqrt(6 / self.features) / self.omega_0)
def forward(self, input):
sine_1 = torch.sin(self.omega_0 * self.linear_1(self.weight_1 * input))
sine_2 = torch.sin(self.omega_0 * self.linear_2(sine_1))
return self.weight_2 * (input + sine_2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BertPredictionHeadTransform | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6o2ucwdqtvjyw7bruyzgade2k6iruvl53t2wmqy2xkgypurpgf.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# erf => erf
# hidden_states_1 => mul_1
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sub => sub
# truediv => div
# u => mean
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + (x0), tmp31, xmask)
tl.store(out_ptr1 + (x0), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pg/cpgs2sqnuixouquussvupjwsl3m3pfglz6posqku5lqt2uwjaend.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# erf => erf
# hidden_states_1 => mul_1
# hidden_states_2 => add_2
# mul => mul
# mul_2 => mul_2
# sqrt => sqrt
# sub => sub
# truediv => div
# x => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_5), kwargs = {})
triton_poi_fused_add_div_erf_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_erf_mul_sqrt_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = 1e-12
tmp14 = tmp12 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp11 / tmp15
tmp17 = tmp0 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
triton_poi_fused_add_div_erf_mul_sqrt_sub_1.run(primals_4, buf0, buf1, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_5
return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
def gelu(x):
"""Gaussian Error Linear Unitという活性化関数です。
LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertPredictionHeadTransform(nn.Module):
"""MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)"""
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
"""hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = 1e-12
tmp14 = tmp12 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp11 / tmp15
tmp17 = tmp0 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4,
buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
def gelu(x):
"""Gaussian Error Linear Unitという活性化関数です。
LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertPredictionHeadTransformNew(nn.Module):
"""MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)"""
def __init__(self, config):
super(BertPredictionHeadTransformNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, input_0):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_4 = self.LayerNorm.gamma
primals_5 = self.LayerNorm.beta
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| kimihitosugiyama/text_analysis | BertPredictionHeadTransform | false | 3,840 | [
"Apache-2.0"
] | 0 | 8f51022957928c31e52af1e0fd407daca3addb40 | https://github.com/kimihitosugiyama/text_analysis/tree/8f51022957928c31e52af1e0fd407daca3addb40 | from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
def gelu(x):
"""Gaussian Error Linear Unitという活性化関数です。
LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super().__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class Model(nn.Module):
"""MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
"""hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BertPreTrainingHeads | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6o2ucwdqtvjyw7bruyzgade2k6iruvl53t2wmqy2xkgypurpgf.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# erf => erf
# hidden_states_1 => mul_1
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sub => sub
# truediv => div
# u => mean
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + (x0), tmp31, xmask)
tl.store(out_ptr1 + (x0), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pg/cpgs2sqnuixouquussvupjwsl3m3pfglz6posqku5lqt2uwjaend.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# erf => erf
# hidden_states_1 => mul_1
# hidden_states_2 => add_2
# mul => mul
# mul_2 => mul_2
# sqrt => sqrt
# sub => sub
# truediv => div
# x => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_5), kwargs = {})
triton_poi_fused_add_div_erf_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_erf_mul_sqrt_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = 1e-12
tmp14 = tmp12 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp11 / tmp15
tmp17 = tmp0 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/54/c54scjtvk2o35gotra5iaxkdvh5gkumub7zcwgocxnyspbrgsbol.py
# Topologically Sorted Source Nodes: [hidden_states_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# hidden_states_3 => add_3
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_7), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (2, 4), (4, 1))
assert_size_stride(primals_9, (2, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
triton_poi_fused_add_div_erf_mul_sqrt_sub_1.run(primals_4, buf0, buf1, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [hidden_states_3], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [seq_relationship_score], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(primals_10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
del primals_9
return (buf5, reinterpret_tensor(buf6, (4, 4, 4, 2), (32, 8, 2, 1), 0), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (64, 4), (4, 1), 0), primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
def gelu(x):
"""Gaussian Error Linear Unitという活性化関数です。
LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertPredictionHeadTransform(nn.Module):
"""MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)"""
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
"""hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class MaskedWordPredictions(nn.Module):
def __init__(self, config):
"""事前学習課題:Masked Language Model用のモジュール
元の[2]の実装では、BertLMPredictionHeadという名前です。
"""
super(MaskedWordPredictions, self).__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(in_features=config.hidden_size,
out_features=config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
"""
hidden_states:BERTからの出力[batch_size, seq_len, hidden_size]
"""
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertPreTrainingHeads(nn.Module):
"""BERTの事前学習課題を行うアダプターモジュール"""
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = MaskedWordPredictions(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
"""入力情報
sequence_output:[batch_size, seq_len, hidden_size]
pooled_output:[batch_size, hidden_size]
"""
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, vocab_size=4),
'bert_model_embedding_weights': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = 1e-12
tmp14 = tmp12 + tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = tmp11 / tmp15
tmp17 = tmp0 * tmp16
tmp19 = tmp17 + tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (2, 4), (4, 1))
assert_size_stride(primals_9, (2,), (1,))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4,
buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_add_2[grid(256)](buf5, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(primals_10, (64,
4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf6)
del primals_8
del primals_9
return buf5, reinterpret_tensor(buf6, (4, 4, 4, 2), (32, 8, 2, 1), 0
), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_10, (64, 4), (4, 1), 0), primals_6
def gelu(x):
"""Gaussian Error Linear Unitという活性化関数です。
LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertPredictionHeadTransform(nn.Module):
"""MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)"""
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
"""hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class MaskedWordPredictions(nn.Module):
def __init__(self, config):
"""事前学習課題:Masked Language Model用のモジュール
元の[2]の実装では、BertLMPredictionHeadという名前です。
"""
super(MaskedWordPredictions, self).__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(in_features=config.hidden_size,
out_features=config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
"""
hidden_states:BERTからの出力[batch_size, seq_len, hidden_size]
"""
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertPreTrainingHeadsNew(nn.Module):
"""BERTの事前学習課題を行うアダプターモジュール"""
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeadsNew, self).__init__()
self.predictions = MaskedWordPredictions(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, input_0, input_1):
primals_2 = self.predictions.bias
primals_1 = self.predictions.transform.dense.weight
primals_4 = self.predictions.transform.dense.bias
primals_5 = self.predictions.transform.LayerNorm.gamma
primals_7 = self.predictions.transform.LayerNorm.beta
primals_6 = self.predictions.decoder.weight
primals_8 = self.seq_relationship.weight
primals_9 = self.seq_relationship.bias
primals_3 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
| kimihitosugiyama/text_analysis | BertPreTrainingHeads | false | 3,841 | [
"Apache-2.0"
] | 0 | 8f51022957928c31e52af1e0fd407daca3addb40 | https://github.com/kimihitosugiyama/text_analysis/tree/8f51022957928c31e52af1e0fd407daca3addb40 | from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
def gelu(x):
"""Gaussian Error Linear Unitという活性化関数です。
LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""LayerNormalization層です。
学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。
オリジナルのGitHubの実装から変数名を変えています。
weight→gamma、bias→beta
"""
super().__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertPredictionHeadTransform(nn.Module):
"""MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
"""hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class MaskedWordPredictions(nn.Module):
def __init__(self, config):
"""事前学習課題:Masked Language Model用のモジュール
元の[2]の実装では、BertLMPredictionHeadという名前です。
"""
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(in_features=config.hidden_size,
out_features=config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
"""
hidden_states:BERTからの出力[batch_size, seq_len, hidden_size]
"""
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class Model(nn.Module):
"""BERTの事前学習課題を行うアダプターモジュール"""
def __init__(self, config, bert_model_embedding_weights):
super().__init__()
self.predictions = MaskedWordPredictions(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
"""入力情報
sequence_output:[batch_size, seq_len, hidden_size]
pooled_output:[batch_size, hidden_size]
"""
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, vocab_size=4),
'bert_model_embedding_weights': 4}]
|
MSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/m2/cm2ulkcufgr42dsw4heohna23zxe43rg4kpxfzrdonmmhuwpfezt.py
# Topologically Sorted Source Nodes: [res, mul, mul_1, image_loss, M, mul_2], Original ATen: [aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# M => sum_1
# image_loss => sum_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# res => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sub), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sub), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1, 2]), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1, 2]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
triton_per_fused_mul_sub_sum_0 = async_compile.triton('triton_per_fused_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tmp0 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [res, mul, mul_1, image_loss, M, mul_2], Original ATen: [aten.sub, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, buf0, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def reduction_batch_based(image_loss, M):
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M)
def reduction_image_based(image_loss, M):
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
class MSELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tmp0 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_mul_sub_sum_0[grid(16)](buf2, arg0_1, arg1_1,
arg2_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0, buf2
def reduction_batch_based(image_loss, M):
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M)
def reduction_image_based(image_loss, M):
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
class MSELossNew(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| kopetri/MIDAS_pytorch | MSELoss | false | 3,842 | [
"MIT"
] | 0 | 9e933bd241ee18b487dcd2b65c28a55d8a923292 | https://github.com/kopetri/MIDAS_pytorch/tree/9e933bd241ee18b487dcd2b65c28a55d8a923292 | import torch
import torch.nn as nn
def reduction_batch_based(image_loss, M):
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M)
def reduction_image_based(image_loss, M):
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
class Model(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
QNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [stateact], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# stateact => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [stateact], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf8, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf4, primals_6, buf7, 256, grid=grid(256), stream=stream0)
del primals_6
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
return (reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_7, buf7, primals_5, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
def __init__(self, statedim, actiondim, hiddendim, init_w=0.0003):
super().__init__()
self.linear1 = nn.Linear(statedim + actiondim, hiddendim)
self.linear2 = nn.Linear(hiddendim, hiddendim)
self.linear3 = nn.Linear(hiddendim, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
stateact = torch.cat([state, action], dim=-1)
x = F.relu(self.linear1(stateact))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'statedim': 4, 'actiondim': 4, 'hiddendim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_4, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf4,
primals_6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_8
return reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(
buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_7, buf7, primals_5, buf8
class QNetworkNew(nn.Module):
def __init__(self, statedim, actiondim, hiddendim, init_w=0.0003):
super().__init__()
self.linear1 = nn.Linear(statedim + actiondim, hiddendim)
self.linear2 = nn.Linear(hiddendim, hiddendim)
self.linear3 = nn.Linear(hiddendim, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0, input_1):
primals_3 = self.linear1.weight
primals_4 = self.linear1.bias
primals_5 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.linear3.weight
primals_8 = self.linear3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| kiranprasad/multiagent-particle-envs | QNetwork | false | 3,843 | [
"MIT"
] | 0 | e28e3ff6606e80f11ee16bb2c42f21c442ad29a8 | https://github.com/kiranprasad/multiagent-particle-envs/tree/e28e3ff6606e80f11ee16bb2c42f21c442ad29a8 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, statedim, actiondim, hiddendim, init_w=0.0003):
super().__init__()
self.linear1 = nn.Linear(statedim + actiondim, hiddendim)
self.linear2 = nn.Linear(hiddendim, hiddendim)
self.linear3 = nn.Linear(hiddendim, 1)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
stateact = torch.cat([state, action], dim=-1)
x = F.relu(self.linear1(stateact))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
FFN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4z/c4zgnn5xgggv23744fzrg6lxhoekqmw3cnbehu67v2xpylosgldq.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2m/c2mt7vvxypcyg4roj4r3bns7fqblbouce2ybektelf7rsc62boym.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hz/chzopt4dha3iysd3mhk7whnzhaph7qbkzn6mohxcchnv63q7tbck.py
# Topologically Sorted Source Nodes: [x_2, x_4], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add
# x_4 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_1, %primals_1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xi/cxinz4rdxhmh5h2quvcvdgopw42iolaz3ntyn2sslxkspcolpf5a.py
# Topologically Sorted Source Nodes: [x_2, x_4], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add
# x_4 => add_1, add_2, mul, mul_1, rsqrt, sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_1, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16, 3), (48, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 16, 4), (64, 4, 1))
buf1 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf7, 64, grid=grid(64), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 16, 4), (0, 4, 1), 0), primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_4], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_2.run(buf3, primals_1, buf4, buf5, 4, grid=grid(4), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_4], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_3.run(buf3, primals_1, buf4, buf5, primals_6, primals_7, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del buf5
del primals_7
return (buf6, primals_1, primals_2, primals_4, primals_6, reinterpret_tensor(buf1, (1, 16, 4), (64, 4, 1), 0), buf3, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16, 3), (48, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFN(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFN, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=3, padding=
1, w_init='relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=3, padding=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_):
x = input_
x = self.w_2(torch.relu(self.w_1(x)))
x = x + input_
x = self.dropout(x)
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 3), (48, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 16, 4), (64, 4, 1))
buf1 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_3, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 16,
4), (0, 4, 1), 0), primals_4, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(16)](buf3, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(4)](buf3, primals_1,
buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf3, primals_1,
buf4, buf5, primals_6, primals_7, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
del buf5
del primals_7
return (buf6, primals_1, primals_2, primals_4, primals_6,
reinterpret_tensor(buf1, (1, 16, 4), (64, 4, 1), 0), buf3, buf7)
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFNNew(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFNNew, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=3, padding=
1, w_init='relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=3, padding=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_0):
primals_2 = self.w_1.conv.weight
primals_3 = self.w_1.conv.bias
primals_4 = self.w_2.conv.weight
primals_5 = self.w_2.conv.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kidconan/fast_speech_trans | FFN | false | 3,844 | [
"MIT"
] | 0 | 4d1d8fe0a871e37165e2a6333a11751ce2a017c0 | https://github.com/kidconan/fast_speech_trans/tree/4d1d8fe0a871e37165e2a6333a11751ce2a017c0 | import torch
import torch.nn as nn
import torch.utils.data
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super().__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class Model(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super().__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=3, padding=
1, w_init='relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=3, padding=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_):
x = input_
x = self.w_2(torch.relu(self.w_1(x)))
x = x + input_
x = self.dropout(x)
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lh/clhtaboxxs526aw4bqcb7s6xoig5vzwco55tfg6waaga3ao3elgd.py
# Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm]
# Source node to ATen node mapping:
# euclidean_distance => add, pow_1, pow_2, sub, sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused_add_norm_sub_0 = async_compile.triton('triton_poi_fused_add_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + (x0), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kr/ckrodhdb4mmjop4vjjr3bzlxptlbnzhgra2njohocdzvced2jwno.py
# Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add_1
# clamp => clamp_min
# loss_contrastive => mean
# mul => mul
# mul_1 => mul_1
# pow_1 => pow_3
# pow_2 => pow_4
# sub => sub_1
# sub_1 => sub_2
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %pow_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_clamp_mean_mul_pow_rsub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp3 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 2.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm]
stream0 = get_raw_stream(0)
triton_poi_fused_add_norm_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean]
triton_per_fused_add_clamp_mean_mul_pow_rsub_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True
)
loss_contrastive = torch.mean((1 - label) * torch.pow(
euclidean_distance, 2) + label * torch.pow(torch.clamp(self.
margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 2.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| kornellewy/face_one_shot_learing | ContrastiveLoss | false | 3,845 | [
"MIT"
] | 0 | 4cd8c8b1807717f921853043858a6f7ad5259917 | https://github.com/kornellewy/face_one_shot_learing/tree/4cd8c8b1807717f921853043858a6f7ad5259917 | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super().__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True
)
loss_contrastive = torch.mean((1 - label) * torch.pow(
euclidean_distance, 2) + label * torch.pow(torch.clamp(self.
margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
EmbedNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/k3/ck32qkbu76goin6gngorb46frxtcgido7u4gqqjikn6bs3l76qke.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = (yindex // 1024)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None)
tl.store(out_ptr0 + (y0 + (1024*x2) + (4194304*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oc/cochsno6wpkwamgsqz5legelnxxchuje5twfzhozvusus3e5bzmo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rw/crwjcvc7uqnpq2ugrojkfmg5yocmtx2f3xkklxvgpq4rds6erx42.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mg/cmgtc4lrnj76uhtbryswckadevfjmrjvgicmfll2snhhbnsejrdo.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 2048
y1 = (yindex // 2048)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (2048*x2) + (8388608*y1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_2, (512, ), (1, ))
assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_7, (2048, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 4096, 4096, grid=grid(4096, 4096), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf3, primals_2, 8388608, grid=grid(8388608), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 8388608, grid=grid(8388608), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2048, 64, 64), (8388608, 1, 131072, 2048))
buf7 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf6, primals_7, buf7, 8192, 4096, grid=grid(8192, 4096), stream=stream0)
del buf6
del primals_7
return (buf7, primals_1, buf0, buf1, primals_6, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class EmbedNet(nn.Module):
def __init__(self, cfg):
super(EmbedNet, self).__init__()
self.embed_conv1 = nn.Conv2d(1024, 512, kernel_size=1, stride=1)
self.embed_conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1,
padding=1)
self.embed_conv3 = nn.Conv2d(512, 2048, kernel_size=1, stride=1)
for l in [self.embed_conv1, self.embed_conv2, self.embed_conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.zeros_(l.bias)
def forward(self, x):
x = F.relu(self.embed_conv1(x))
x = F.relu(self.embed_conv2(x))
x = self.embed_conv3(x)
return x
def get_inputs():
return [torch.rand([4, 1024, 64, 64])]
def get_init_inputs():
return [[], {'cfg': _mock_config()}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 2048
y1 = yindex // 2048
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2048 * x2 + 8388608 * y1), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_7, (2048,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536,
1024), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4096, 4096)](primals_3, buf0, 4096, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_1[grid(262144, 9)](primals_4, buf1, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_2[grid(8388608)](buf3, primals_2,
8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf4 = extern_kernels.convolution(buf3, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(8388608)](buf5, primals_5,
8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2048, 64, 64), (8388608, 1, 131072, 2048))
buf7 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(8192, 4096)](buf6, primals_7,
buf7, 8192, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1)
del buf6
del primals_7
return buf7, primals_1, buf0, buf1, primals_6, buf3, buf5
class EmbedNetNew(nn.Module):
def __init__(self, cfg):
super(EmbedNetNew, self).__init__()
self.embed_conv1 = nn.Conv2d(1024, 512, kernel_size=1, stride=1)
self.embed_conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1,
padding=1)
self.embed_conv3 = nn.Conv2d(512, 2048, kernel_size=1, stride=1)
for l in [self.embed_conv1, self.embed_conv2, self.embed_conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.zeros_(l.bias)
def forward(self, input_0):
primals_1 = self.embed_conv1.weight
primals_2 = self.embed_conv1.bias
primals_4 = self.embed_conv2.weight
primals_5 = self.embed_conv2.bias
primals_6 = self.embed_conv3.weight
primals_7 = self.embed_conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| hwfan/mega.pytorch | EmbedNet | false | 3,846 | [
"BSD-2-Clause"
] | 0 | a07b2267daad73c9482233cfe754d59b8ae2f688 | https://github.com/hwfan/mega.pytorch/tree/a07b2267daad73c9482233cfe754d59b8ae2f688 | from _paritybench_helpers import _mock_config
import torch
from torchvision.transforms import functional as F
import torch.utils.data
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, cfg):
super().__init__()
self.embed_conv1 = nn.Conv2d(1024, 512, kernel_size=1, stride=1)
self.embed_conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1,
padding=1)
self.embed_conv3 = nn.Conv2d(512, 2048, kernel_size=1, stride=1)
for l in [self.embed_conv1, self.embed_conv2, self.embed_conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
nn.init.zeros_(l.bias)
def forward(self, x):
x = F.relu(self.embed_conv1(x))
x = F.relu(self.embed_conv2(x))
x = self.embed_conv3(x)
return x
def get_inputs():
return [torch.rand([4, 1024, 64, 64])]
def get_init_inputs():
return []
|
CAM_Use | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/c5/cc5yrdhnfgzhobre36pxf5twcq34edx7g52e75nchrp6wm5m26p7.py
# Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# out_2 => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %view_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_3, buf0, primals_1, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_3
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class CAM_Use(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_Use, self).__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x, attention):
"""
inputs :
x : input feature maps( B X C X H X W)
attention: B X C X C
returns :
out : attention value + input feature
"""
m_batchsize, C, height, width = x.size()
proj_value = x.contiguous().view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(primals_2, reinterpret_tensor(primals_1, (4, 4,
16), (64, 16, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_3, buf0, primals_1,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf1, buf0
class CAM_UseNew(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_UseNew, self).__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, input_0, input_1):
primals_3 = self.gamma
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| jbcnrlz/san | CAM_Use | false | 3,847 | [
"MIT"
] | 0 | 1eab20f83d3c7dba5607e22d1c70768905b62b12 | https://github.com/jbcnrlz/san/tree/1eab20f83d3c7dba5607e22d1c70768905b62b12 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim):
super().__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x, attention):
"""
inputs :
x : input feature maps( B X C X H X W)
attention: B X C X C
returns :
out : attention value + input feature
"""
m_batchsize, C, height, width = x.size()
proj_value = x.contiguous().view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
CAM_Calculate | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3m/c3mxgkf4weymbmbgydi4j4i6eycdz2flzbf3jce3eapte2aqyfta.py
# Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# energy_new => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %bmm), kwargs = {})
triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = tmp6 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class CAM_Calculate(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_Calculate, self).__init__()
self.chanel_in = in_dim
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
attention: B X C X C
"""
m_batchsize, C, _height, _width = x.size()
proj_query = x.contiguous().view(m_batchsize, C, -1)
proj_key = x.contiguous().view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy
) - energy
attention = self.softmax(energy_new)
return attention
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + x2, xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = tmp6 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16,
1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0),
out=buf0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf3,
class CAM_CalculateNew(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_CalculateNew, self).__init__()
self.chanel_in = in_dim
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| jbcnrlz/san | CAM_Calculate | false | 3,848 | [
"MIT"
] | 0 | 1eab20f83d3c7dba5607e22d1c70768905b62b12 | https://github.com/jbcnrlz/san/tree/1eab20f83d3c7dba5607e22d1c70768905b62b12 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim):
super().__init__()
self.chanel_in = in_dim
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
attention: B X C X C
"""
m_batchsize, C, _height, _width = x.size()
proj_query = x.contiguous().view(m_batchsize, C, -1)
proj_key = x.contiguous().view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy
) - energy
attention = self.softmax(energy_new)
return attention
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
FFN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/37/c37iajzamvg4r3s5ikb4y6kka2x3towdlz4bqoh3dx4uywvya2mb.py
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# relu => relu
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tr/ctrdeeo45yfmpbksxog7is2d6fd26mv2poki6u26emzhamo2zqxd.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add
# x_5 => clone, var_mean
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/px/cpxbmtafvoqnd5j3oyskd4thxpat5nbj25jgagf6an6xgvaf47sv.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add
# x_5 => add_1, add_2, clone, mul, mul_1, rsqrt, sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y3), ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4), (64, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_3.run(buf4, primals_1, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del buf5
del buf6
del primals_7
return (buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16, 1), (16, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch as t
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFN(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFN, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_):
x = input_.transpose(1, 2)
x = self.w_2(t.relu(self.w_1(x)))
x = x.transpose(1, 2)
x = x + input_
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4), (64, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4,
primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_7
return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFNNew(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFNNew, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_0):
primals_2 = self.w_1.conv.weight
primals_3 = self.w_1.conv.bias
primals_4 = self.w_2.conv.weight
primals_5 = self.w_2.conv.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| kongziyue1234/mooc | FFN | false | 3,849 | [
"MIT"
] | 0 | 3b0c822dd55c1066cbc829137e6c424dcda5067e | https://github.com/kongziyue1234/mooc/tree/3b0c822dd55c1066cbc829137e6c424dcda5067e | import torch
import torch.nn as nn
import torch as t
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super().__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class Model(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super().__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_):
x = input_.transpose(1, 2)
x = self.w_2(t.relu(self.w_1(x)))
x = x.transpose(1, 2)
x = x + input_
x = self.layer_norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
PairwiseRankingLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/eq/ceqhkwm2d735ddv4kryb45afnds5mdj2jfkqyfgb53xuu6ykceh3.py
# Topologically Sorted Source Nodes: [sub, add, clamp, cost_sent, sub_1, add_1, clamp_1, cost_img, loss], Original ATen: [aten.rsub, aten.add, aten.clamp, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# clamp => clamp_min
# clamp_1 => clamp_min_1
# cost_img => sum_2
# cost_sent => sum_1
# loss => add_2
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %arg2_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %arg3_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_add_clamp_rsub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr2 + (r0), None)
tmp12 = tl.load(in_ptr3 + (r0), None)
tmp1 = 4.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp11 = tmp1 - tmp10
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp13, tmp5)
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tmp9 + tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, add, clamp, cost_sent, sub_1, add_1, clamp_1, cost_img, loss], Original ATen: [aten.rsub, aten.add, aten.clamp, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_rsub_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PairwiseRankingLoss(nn.Module):
"""
Pairwise ranking loss
"""
def __init__(self, margin):
super(PairwiseRankingLoss, self).__init__()
self.margin = margin
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(self.margin - anchor1 + img_sentc, min=0.0
).sum()
cost_img = torch.clamp(self.margin - anchor2 + sent_imgc, min=0.0).sum(
)
loss = cost_sent + cost_img
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp12 = tl.load(in_ptr3 + r0, None)
tmp1 = 4.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp11 = tmp1 - tmp10
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp13, tmp5)
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tmp9 + tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_rsub_sum_0[grid(1)](buf2, arg0_1, arg1_1,
arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf2,
class PairwiseRankingLossNew(nn.Module):
"""
Pairwise ranking loss
"""
def __init__(self, margin):
super(PairwiseRankingLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| ktodorov/uva-semantics-19 | PairwiseRankingLoss | false | 3,850 | [
"MIT"
] | 0 | c20e4f1d00f6693a8a46dd1d5576cfd3adced896 | https://github.com/ktodorov/uva-semantics-19/tree/c20e4f1d00f6693a8a46dd1d5576cfd3adced896 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Pairwise ranking loss
"""
def __init__(self, margin):
super().__init__()
self.margin = margin
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(self.margin - anchor1 + img_sentc, min=0.0
).sum()
cost_img = torch.clamp(self.margin - anchor2 + sent_imgc, min=0.0).sum(
)
loss = cost_sent + cost_img
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MeanEncoder | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/s2/cs2gfhtwtl27vutbe74iwhiq6w3cwhrdip3wxe356hgnu2svgem3.py
# Topologically Sorted Source Nodes: [sum_1, out], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# out => div
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [0]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %view), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, out], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BaseEncoder(nn.Module):
def __init__(self):
super(BaseEncoder, self).__init__()
self._input_dimensions = 0
@property
def input_dimensions(self):
return self._input_dimensions
class MeanEncoder(BaseEncoder):
def __init__(self):
super(MeanEncoder, self).__init__()
self._input_dimensions = 4 * 300
def forward(self, x, x_len):
out = torch.div(torch.sum(x, 0), x_len.float().view(-1, 1))
return out
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BaseEncoder(nn.Module):
def __init__(self):
super(BaseEncoder, self).__init__()
self._input_dimensions = 0
@property
def input_dimensions(self):
return self._input_dimensions
class MeanEncoderNew(BaseEncoder):
def __init__(self):
super(MeanEncoderNew, self).__init__()
self._input_dimensions = 4 * 300
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ktodorov/uva-semantics-19 | MeanEncoder | false | 3,851 | [
"MIT"
] | 0 | c20e4f1d00f6693a8a46dd1d5576cfd3adced896 | https://github.com/ktodorov/uva-semantics-19/tree/c20e4f1d00f6693a8a46dd1d5576cfd3adced896 | import torch
import torch.nn as nn
class BaseEncoder(nn.Module):
def __init__(self):
super().__init__()
self._input_dimensions = 0
@property
def input_dimensions(self):
return self._input_dimensions
class Model(BaseEncoder):
def __init__(self):
super().__init__()
self._input_dimensions = 4 * 300
def forward(self, x, x_len):
out = torch.div(torch.sum(x, 0), x_len.float().view(-1, 1))
return out
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
OneConv3d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ka/ckag6usfgndtl7sfphsoq73ejhb2hsruuuryrnl4axuh3zgzio2d.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 262144) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3, 3), (27, 27, 9, 3, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 64, 64, 64), (524288, 262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 2097152, grid=grid(2097152), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 1, 3, 3, 3), (27, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import logging
import torch
class OneConv3d(torch.nn.Module):
"""OneConv3d.
"""
def __init__(self, out_channels=2):
super().__init__()
self.layer = torch.nn.Conv3d(in_channels=1, out_channels=
out_channels, kernel_size=3, padding=1)
def forward(self, x):
logging.debug(f'x.shape={x.shape!r}')
out = self.layer(x)
logging.debug(f'out.shape={out.shape!r}')
return out
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 2
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 3, 3, 3), (27, 27, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096,
64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 64, 64, 64), (524288, 262144, 4096,
64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(2097152)](buf1, primals_2,
2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class OneConv3dNew(torch.nn.Module):
"""OneConv3d.
"""
def __init__(self, out_channels=2):
super().__init__()
self.layer = torch.nn.Conv3d(in_channels=1, out_channels=
out_channels, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.layer.weight
primals_2 = self.layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| kirchhausenlab/incasem | OneConv3d | false | 3,852 | [
"BSD-3-Clause"
] | 0 | ee9e007c5c04571e547e2fb5af5e800bd2d2b435 | https://github.com/kirchhausenlab/incasem/tree/ee9e007c5c04571e547e2fb5af5e800bd2d2b435 | import logging
import torch
class Model(torch.nn.Module):
"""OneConv3d.
"""
def __init__(self, out_channels=2):
super().__init__()
self.layer = torch.nn.Conv3d(in_channels=1, out_channels=
out_channels, kernel_size=3, padding=1)
def forward(self, x):
logging.debug(f'x.shape={x.shape!r}')
out = self.layer(x)
logging.debug(f'out.shape={out.shape!r}')
return out
def get_inputs():
return [torch.rand([4, 1, 64, 64, 64])]
def get_init_inputs():
return []
|
encoder4 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5t/c5ta5b5nw4dp65565mg3k6wfbphtogtvx5v75up5yeibgiwkacek.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xc/cxcfm77ssn7kbjy2u5nrtv4hw4c565yr5hmv5uyyxhavwxuhcpam.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => convolution
# out_1 => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_8 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 52272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3) % 66
x2 = (xindex // 198) % 66
x3 = (xindex // 13068)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (12285 + x0 + ((-192)*(tl_math.abs((-63) + (tl_math.abs((-1) + x2))))) + ((-3)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (12288*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s7/cs7poqszye2ebmzd7wecdrerxmzb2mvnrdismv7aukobhi66kq2x.py
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_2 => convolution_1
# out_3 => relu
# out_4 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_9 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 66
x2 = (xindex // 4224) % 66
x3 = (xindex // 278784)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (262080 + x0 + ((-4096)*(tl_math.abs((-63) + (tl_math.abs((-1) + x2))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (262144*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y7/cy74ayecev2pcofz3fyu6lc473nqeaato7assx62kzcpdkdyzi7o.py
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_5 => convolution_2
# out_6 => relu_1
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rx/crxlkbr6satrqpxurojtni22byaxovvo72ydojpsrav6fl6aqki2.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_7 => getitem_1
# Graph fragment:
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64) % 32
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None)
tmp12 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kq/ckqgavobs6wvx6foqntnpfzmwum2js237trqon7rx6e2whfxydk4.py
# Topologically Sorted Source Nodes: [out_7, out_8], Original ATen: [aten.max_pool2d_with_indices, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_7 => _low_memory_max_pool2d_with_offsets
# out_8 => _unsafe_index_4, _unsafe_index_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem, [None, None, %sub_9, None]), kwargs = {})
# %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %sub_9]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 34
x2 = (xindex // 2176) % 34
x3 = (xindex // 73984)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (257920 + x0 + ((-8192)*(tl_math.abs((-31) + (tl_math.abs((-1) + x2))))) + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (262144*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (257984 + x0 + ((-8192)*(tl_math.abs((-31) + (tl_math.abs((-1) + x2))))) + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (262144*x3)), xmask)
tmp3 = tl.load(in_ptr0 + (262016 + x0 + ((-8192)*(tl_math.abs((-31) + (tl_math.abs((-1) + x2))))) + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (262144*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (262080 + x0 + ((-8192)*(tl_math.abs((-31) + (tl_math.abs((-1) + x2))))) + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (262144*x3)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sn/csnvw3rjb7yxqnapqzbdhdtxhizhc5ldnten4bsolvaxq6vjtsoy.py
# Topologically Sorted Source Nodes: [out_9, out_10, out_11], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_10 => relu_2
# out_11 => _unsafe_index_6, _unsafe_index_7
# out_9 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %sub_9, None]), kwargs = {})
# %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_6, [None, None, None, %sub_9]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_13 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 34
x2 = (xindex // 4352) % 34
x3 = (xindex // 147968)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (130944 + x0 + ((-4096)*(tl_math.abs((-31) + (tl_math.abs((-1) + x2))))) + ((-128)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (131072*x3)), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nx/cnxktby45un7uvjbdir4ageiyk7fw5gjiqfbtzwgzy4qgxog5njt.py
# Topologically Sorted Source Nodes: [out_12, out_13], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_12 => convolution_4
# out_13 => relu_3
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ah/cah4qlfmhgjrbbg4bzgappxrvmz4cpdjjbm2zhu3t5tcykg7i7no.py
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_14 => getitem_3
# Graph fragment:
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 16
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x2)), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x2)), None)
tmp12 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x2)), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cy/ccyr34pbi6eqe33kryc7anpqnp7ed3f6ivtkdsvjui625bphcwga.py
# Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.max_pool2d_with_indices, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_14 => _low_memory_max_pool2d_with_offsets_1
# out_15 => _unsafe_index_8, _unsafe_index_9
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_2, [None, None, %sub_17, None]), kwargs = {})
# %_unsafe_index_9 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_8, [None, None, None, %sub_17]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_16 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_16(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 18
x2 = (xindex // 2304) % 18
x3 = (xindex // 41472)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (126720 + x0 + ((-8192)*(tl_math.abs((-15) + (tl_math.abs((-1) + x2))))) + ((-256)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (131072*x3)), None)
tmp1 = tl.load(in_ptr0 + (126848 + x0 + ((-8192)*(tl_math.abs((-15) + (tl_math.abs((-1) + x2))))) + ((-256)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (131072*x3)), None)
tmp3 = tl.load(in_ptr0 + (130816 + x0 + ((-8192)*(tl_math.abs((-15) + (tl_math.abs((-1) + x2))))) + ((-256)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (131072*x3)), None)
tmp5 = tl.load(in_ptr0 + (130944 + x0 + ((-8192)*(tl_math.abs((-15) + (tl_math.abs((-1) + x2))))) + ((-256)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (131072*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x4), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ci/ccipnp42jbmhsktotqlnyrzupgk4qhps4a4mjc6p2ckzvec2ncbr.py
# Topologically Sorted Source Nodes: [out_16, out_17, out_18], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_16 => convolution_5
# out_17 => relu_4
# out_18 => _unsafe_index_10, _unsafe_index_11
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_9, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %_unsafe_index_10 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %sub_17, None]), kwargs = {})
# %_unsafe_index_11 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_10, [None, None, None, %sub_17]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_17 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 331776
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 18
x2 = (xindex // 4608) % 18
x3 = (xindex // 82944)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (65280 + x0 + ((-4096)*(tl_math.abs((-15) + (tl_math.abs((-1) + x2))))) + ((-256)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (65536*x3)), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bv/cbvfxdm57fadbu6rxzoqxlhtygc74dkam5xfkym3yzky2aur42g4.py
# Topologically Sorted Source Nodes: [out_25, out_26], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_25 => convolution_8
# out_26 => relu_7
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_15, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nz/cnz2lj4tpa5v3pjd2xxbfyk4hkutzmx6dn3cbcej2bolpsrjgml4.py
# Topologically Sorted Source Nodes: [out_27], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_27 => getitem_5
# Graph fragment:
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_19 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 8
x2 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None)
tmp12 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5j/c5jubjoeibvcx4uunpdvkea6ywamda4n2dmyz6whtayrl4vqhcky.py
# Topologically Sorted Source Nodes: [out_27, out_28], Original ATen: [aten.max_pool2d_with_indices, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_27 => _low_memory_max_pool2d_with_offsets_2
# out_28 => _unsafe_index_16, _unsafe_index_17
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_7, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %_unsafe_index_16 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_4, [None, None, %sub_33, None]), kwargs = {})
# %_unsafe_index_17 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_16, [None, None, None, %sub_33]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_20 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_20(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 102400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 10
x2 = (xindex // 2560) % 10
x3 = (xindex // 25600)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (60928 + x0 + ((-8192)*(tl_math.abs((-7) + (tl_math.abs((-1) + x2))))) + ((-512)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (65536*x3)), None)
tmp1 = tl.load(in_ptr0 + (61184 + x0 + ((-8192)*(tl_math.abs((-7) + (tl_math.abs((-1) + x2))))) + ((-512)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (65536*x3)), None)
tmp3 = tl.load(in_ptr0 + (65024 + x0 + ((-8192)*(tl_math.abs((-7) + (tl_math.abs((-1) + x2))))) + ((-512)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (65536*x3)), None)
tmp5 = tl.load(in_ptr0 + (65280 + x0 + ((-8192)*(tl_math.abs((-7) + (tl_math.abs((-1) + x2))))) + ((-512)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (65536*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x4), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6e/c6ebhusmqfeayps2q55d7t23ye4uthdjsajiyeto65pxdtx3rhkq.py
# Topologically Sorted Source Nodes: [out_29, out_30], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_29 => convolution_9
# out_30 => relu_8
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_17, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_8, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_21 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 64], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = (yindex // 512)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (32768*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (64*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (512*x2) + (32768*y1)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ca/ccas2qbjni3hucw6abdbbofmsk4dbrd5kd3ujgvfpxuonolhk7p5.py
# Topologically Sorted Source Nodes: [out_22, out_23], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_22 => convolution_7
# out_23 => relu_6
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %le_38 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_22 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/p4/cp4a5jhmxqgur6uipmtn74cebd6falyodaqz5vedteff7gd7yayt.py
# Topologically Sorted Source Nodes: [out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_10 => relu_2
# out_9 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_114 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_23 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uf/cufs4qt2zzbiuajkuigk5cyac66gvn7ceoafrfalxcr5pk6qk45w.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_2 => convolution_1
# out_3 => relu
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_152 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_24 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args
args.clear()
assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (3, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (256, ), (1, ))
assert_size_stride(primals_20, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 192, 9, grid=grid(192, 9), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_6
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_8, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_8
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_10, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_10
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_12, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_12
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_14, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_14
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_16, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_16
buf8 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_18, buf8, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_18
buf9 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_7.run(primals_20, buf9, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_20
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 3, 64, 64), (12288, 1, 192, 3))
buf11 = empty_strided_cuda((4, 3, 66, 66), (13068, 1, 198, 3), torch.float32)
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_8.run(buf10, primals_2, buf11, 52272, grid=grid(52272), stream=stream0)
del buf10
del primals_2
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf13 = empty_strided_cuda((4, 64, 66, 66), (278784, 1, 4224, 64), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_9.run(buf12, primals_5, buf13, 1115136, grid=grid(1115136), stream=stream0)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf15, primals_7, 1048576, grid=grid(1048576), stream=stream0)
del primals_7
buf16 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_11.run(buf15, buf16, 262144, grid=grid(262144), stream=stream0)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1, 2176, 64), torch.float32)
# Topologically Sorted Source Nodes: [out_7, out_8], Original ATen: [aten.max_pool2d_with_indices, aten.reflection_pad2d]
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_12.run(buf15, buf17, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf19 = empty_strided_cuda((4, 128, 34, 34), (147968, 1, 4352, 128), torch.float32)
# Topologically Sorted Source Nodes: [out_9, out_10, out_11], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_13.run(buf18, primals_9, buf19, 591872, grid=grid(591872), stream=stream0)
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [out_12, out_13], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf21, primals_11, 524288, grid=grid(524288), stream=stream0)
del primals_11
buf22 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8)
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_15.run(buf21, buf22, 131072, grid=grid(131072), stream=stream0)
buf23 = empty_strided_cuda((4, 128, 18, 18), (41472, 1, 2304, 128), torch.float32)
# Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.max_pool2d_with_indices, aten.reflection_pad2d]
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_16.run(buf21, buf23, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf25 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256), torch.float32)
# Topologically Sorted Source Nodes: [out_16, out_17, out_18], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_17.run(buf24, primals_13, buf25, 331776, grid=grid(331776), stream=stream0)
# Topologically Sorted Source Nodes: [out_19], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256), torch.float32)
# Topologically Sorted Source Nodes: [out_19, out_20, out_21], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_17.run(buf26, primals_15, buf27, 331776, grid=grid(331776), stream=stream0)
# Topologically Sorted Source Nodes: [out_22], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256), torch.float32)
# Topologically Sorted Source Nodes: [out_22, out_23, out_24], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_17.run(buf28, primals_17, buf29, 331776, grid=grid(331776), stream=stream0)
# Topologically Sorted Source Nodes: [out_25], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [out_25, out_26], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf31, primals_19, 262144, grid=grid(262144), stream=stream0)
del primals_19
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8)
# Topologically Sorted Source Nodes: [out_27], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_19.run(buf31, buf32, 65536, grid=grid(65536), stream=stream0)
buf33 = empty_strided_cuda((4, 256, 10, 10), (25600, 1, 2560, 256), torch.float32)
# Topologically Sorted Source Nodes: [out_27, out_28], Original ATen: [aten.max_pool2d_with_indices, aten.reflection_pad2d]
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_20.run(buf31, buf33, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [out_29], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32)
buf36 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool)
# Topologically Sorted Source Nodes: [out_29, out_30], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_21.run(buf34, primals_21, buf35, buf36, 2048, 64, grid=grid(2048, 64), stream=stream0)
del buf34
del primals_21
buf37 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.bool)
# Topologically Sorted Source Nodes: [out_22, out_23], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_22.run(buf28, primals_17, buf37, 262144, grid=grid(262144), stream=stream0)
del buf28
del primals_17
buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.bool)
# Topologically Sorted Source Nodes: [out_19, out_20], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_22.run(buf26, primals_15, buf38, 262144, grid=grid(262144), stream=stream0)
del buf26
del primals_15
buf39 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.bool)
# Topologically Sorted Source Nodes: [out_16, out_17], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_22.run(buf24, primals_13, buf39, 262144, grid=grid(262144), stream=stream0)
del buf24
del primals_13
buf40 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.bool)
# Topologically Sorted Source Nodes: [out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_23.run(buf18, primals_9, buf40, 524288, grid=grid(524288), stream=stream0)
del buf18
del primals_9
buf41 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_24.run(buf12, primals_5, buf41, 1048576, grid=grid(1048576), stream=stream0)
del buf12
del primals_5
return (buf35, primals_1, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf32, buf33, buf36, buf37, buf38, buf39, buf40, buf41, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch
import torch.nn as nn
class encoder4(nn.Module):
def __init__(self):
super(encoder4, self).__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, 0)
self.reflecPad1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv2 = nn.Conv2d(3, 64, 3, 1, 0)
self.relu2 = nn.ReLU(inplace=True)
self.reflecPad3 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu3 = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad4 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv4 = nn.Conv2d(64, 128, 3, 1, 0)
self.relu4 = nn.ReLU(inplace=True)
self.reflecPad5 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv5 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu5 = nn.ReLU(inplace=True)
self.maxPool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad6 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv6 = nn.Conv2d(128, 256, 3, 1, 0)
self.relu6 = nn.ReLU(inplace=True)
self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.maxPool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(256, 512, 3, 1, 0)
self.relu10 = nn.ReLU(inplace=True)
def forward(self, x, sF=None, matrix11=None, matrix21=None, matrix31=None):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad7(out)
out = self.conv3(out)
out = self.relu3(out)
out = self.maxPool(out)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad7(out)
out = self.conv5(out)
out = self.relu5(out)
out = self.maxPool2(out)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.maxPool3(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 52272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 66
x2 = xindex // 198 % 66
x3 = xindex // 13068
x4 = xindex
tmp0 = tl.load(in_ptr0 + (12285 + x0 + -192 * tl_math.abs(-63 + tl_math
.abs(-1 + x2)) + -3 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) +
12288 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 66
x2 = xindex // 4224 % 66
x3 = xindex // 278784
x4 = xindex
tmp0 = tl.load(in_ptr0 + (262080 + x0 + -4096 * tl_math.abs(-63 +
tl_math.abs(-1 + x2)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1
)) + 262144 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 32
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None)
tmp12 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_12(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 34
x2 = xindex // 2176 % 34
x3 = xindex // 73984
x4 = xindex
tmp0 = tl.load(in_ptr0 + (257920 + x0 + -8192 * tl_math.abs(-31 +
tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 +
x1)) + 262144 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (257984 + x0 + -8192 * tl_math.abs(-31 +
tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 +
x1)) + 262144 * x3), xmask)
tmp3 = tl.load(in_ptr0 + (262016 + x0 + -8192 * tl_math.abs(-31 +
tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 +
x1)) + 262144 * x3), xmask)
tmp5 = tl.load(in_ptr0 + (262080 + x0 + -8192 * tl_math.abs(-31 +
tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 +
x1)) + 262144 * x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_13(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 34
x2 = xindex // 4352 % 34
x3 = xindex // 147968
x4 = xindex
tmp0 = tl.load(in_ptr0 + (130944 + x0 + -4096 * tl_math.abs(-31 +
tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 +
x1)) + 131072 * x3), None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x4, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 16
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None)
tmp12 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_16(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 18
x2 = xindex // 2304 % 18
x3 = xindex // 41472
x4 = xindex
tmp0 = tl.load(in_ptr0 + (126720 + x0 + -8192 * tl_math.abs(-15 +
tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 +
x1)) + 131072 * x3), None)
tmp1 = tl.load(in_ptr0 + (126848 + x0 + -8192 * tl_math.abs(-15 +
tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 +
x1)) + 131072 * x3), None)
tmp3 = tl.load(in_ptr0 + (130816 + x0 + -8192 * tl_math.abs(-15 +
tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 +
x1)) + 131072 * x3), None)
tmp5 = tl.load(in_ptr0 + (130944 + x0 + -8192 * tl_math.abs(-15 +
tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 +
x1)) + 131072 * x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x4, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_17(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 18
x2 = xindex // 4608 % 18
x3 = xindex // 82944
x4 = xindex
tmp0 = tl.load(in_ptr0 + (65280 + x0 + -4096 * tl_math.abs(-15 +
tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 +
x1)) + 65536 * x3), None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x4, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 8
x2 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None)
tmp12 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_20(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 10
x2 = xindex // 2560 % 10
x3 = xindex // 25600
x4 = xindex
tmp0 = tl.load(in_ptr0 + (60928 + x0 + -8192 * tl_math.abs(-7 + tl_math
.abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) +
65536 * x3), None)
tmp1 = tl.load(in_ptr0 + (61184 + x0 + -8192 * tl_math.abs(-7 + tl_math
.abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) +
65536 * x3), None)
tmp3 = tl.load(in_ptr0 + (65024 + x0 + -8192 * tl_math.abs(-7 + tl_math
.abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) +
65536 * x3), None)
tmp5 = tl.load(in_ptr0 + (65280 + x0 + -8192 * tl_math.abs(-7 + tl_math
.abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) +
65536 * x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x4, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_3, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32)
triton_poi_fused_1[grid(192, 9)](primals_4, buf1, 192, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_2[grid(4096, 9)](primals_6, buf2, 4096, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_8, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 9)](primals_10, buf4, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_5[grid(32768, 9)](primals_12, buf5, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_14, buf6, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_16, buf7, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf8 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_6[grid(65536, 9)](primals_18, buf8, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_18
buf9 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_7[grid(131072, 9)](primals_20, buf9, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf10 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 3, 64, 64), (12288, 1, 192, 3))
buf11 = empty_strided_cuda((4, 3, 66, 66), (13068, 1, 198, 3),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_8[grid(52272)](buf10,
primals_2, buf11, 52272, XBLOCK=512, num_warps=4, num_stages=1)
del buf10
del primals_2
buf12 = extern_kernels.convolution(buf11, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf13 = empty_strided_cuda((4, 64, 66, 66), (278784, 1, 4224, 64),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(1115136)](
buf12, primals_5, buf13, 1115136, XBLOCK=1024, num_warps=4,
num_stages=1)
buf14 = extern_kernels.convolution(buf13, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_10[grid(1048576)](buf15,
primals_7, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf16 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_11[grid(262144)](buf15,
buf16, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1, 2176, 64),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_12[grid(
295936)](buf15, buf17, 295936, XBLOCK=512, num_warps=8,
num_stages=1)
buf18 = extern_kernels.convolution(buf17, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf19 = empty_strided_cuda((4, 128, 34, 34), (147968, 1, 4352, 128),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_13[grid(591872)](
buf18, primals_9, buf19, 591872, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_14[grid(524288)](buf21,
primals_11, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf22 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(131072)](buf21,
buf22, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf23 = empty_strided_cuda((4, 128, 18, 18), (41472, 1, 2304, 128),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_16[grid(
165888)](buf21, buf23, 165888, XBLOCK=1024, num_warps=4,
num_stages=1)
buf24 = extern_kernels.convolution(buf23, buf5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf25 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_17[grid(331776)](
buf24, primals_13, buf25, 331776, XBLOCK=1024, num_warps=4,
num_stages=1)
buf26 = extern_kernels.convolution(buf25, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf27 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_17[grid(331776)](
buf26, primals_15, buf27, 331776, XBLOCK=1024, num_warps=4,
num_stages=1)
buf28 = extern_kernels.convolution(buf27, buf7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf29 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_17[grid(331776)](
buf28, primals_17, buf29, 331776, XBLOCK=1024, num_warps=4,
num_stages=1)
buf30 = extern_kernels.convolution(buf29, buf8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_18[grid(262144)](buf31,
primals_19, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(65536)](buf31,
buf32, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf33 = empty_strided_cuda((4, 256, 10, 10), (25600, 1, 2560, 256),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_20[grid(
102400)](buf31, buf33, 102400, XBLOCK=512, num_warps=8,
num_stages=1)
buf34 = extern_kernels.convolution(buf33, buf9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf35 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
buf36 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_21[grid(2048, 64)
](buf34, primals_21, buf35, buf36, 2048, 64, XBLOCK=32, YBLOCK=
32, num_warps=4, num_stages=1)
del buf34
del primals_21
buf37 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_22[grid(262144)](
buf28, primals_17, buf37, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf28
del primals_17
buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_22[grid(262144)](
buf26, primals_15, buf38, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf26
del primals_15
buf39 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_22[grid(262144)](
buf24, primals_13, buf39, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf24
del primals_13
buf40 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_23[grid(524288)](
buf18, primals_9, buf40, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_9
buf41 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_24[grid(1048576)](
buf12, primals_5, buf41, 1048576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf12
del primals_5
return (buf35, primals_1, buf0, buf1, buf2, buf3, buf4, buf5, buf6,
buf7, buf8, buf9, buf11, buf13, buf15, buf16, buf17, buf19, buf21,
buf22, buf23, buf25, buf27, buf29, buf31, buf32, buf33, buf36,
buf37, buf38, buf39, buf40, buf41)
class encoder4New(nn.Module):
def __init__(self):
super(encoder4New, self).__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, 0)
self.reflecPad1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv2 = nn.Conv2d(3, 64, 3, 1, 0)
self.relu2 = nn.ReLU(inplace=True)
self.reflecPad3 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu3 = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad4 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv4 = nn.Conv2d(64, 128, 3, 1, 0)
self.relu4 = nn.ReLU(inplace=True)
self.reflecPad5 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv5 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu5 = nn.ReLU(inplace=True)
self.maxPool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad6 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv6 = nn.Conv2d(128, 256, 3, 1, 0)
self.relu6 = nn.ReLU(inplace=True)
self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.maxPool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(256, 512, 3, 1, 0)
self.relu10 = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_14 = self.conv7.weight
primals_15 = self.conv7.bias
primals_16 = self.conv8.weight
primals_17 = self.conv8.bias
primals_18 = self.conv9.weight
primals_19 = self.conv9.bias
primals_20 = self.conv10.weight
primals_21 = self.conv10.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
| kamieen03/style-transfer-server | encoder4 | false | 3,853 | [
"BSD-2-Clause"
] | 0 | 91727ec62080215a0b870ce043faf0657137b84b | https://github.com/kamieen03/style-transfer-server/tree/91727ec62080215a0b870ce043faf0657137b84b | import torch
import torch.nn
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, 0)
self.reflecPad1 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv2 = nn.Conv2d(3, 64, 3, 1, 0)
self.relu2 = nn.ReLU(inplace=True)
self.reflecPad3 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
self.relu3 = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad4 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv4 = nn.Conv2d(64, 128, 3, 1, 0)
self.relu4 = nn.ReLU(inplace=True)
self.reflecPad5 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv5 = nn.Conv2d(128, 128, 3, 1, 0)
self.relu5 = nn.ReLU(inplace=True)
self.maxPool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad6 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv6 = nn.Conv2d(128, 256, 3, 1, 0)
self.relu6 = nn.ReLU(inplace=True)
self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv7 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu7 = nn.ReLU(inplace=True)
self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv8 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv9 = nn.Conv2d(256, 256, 3, 1, 0)
self.relu9 = nn.ReLU(inplace=True)
self.maxPool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv10 = nn.Conv2d(256, 512, 3, 1, 0)
self.relu10 = nn.ReLU(inplace=True)
def forward(self, x, sF=None, matrix11=None, matrix21=None, matrix31=None):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad7(out)
out = self.conv3(out)
out = self.relu3(out)
out = self.maxPool(out)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad7(out)
out = self.conv5(out)
out = self.relu5(out)
out = self.maxPool2(out)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.maxPool3(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
AlexNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ix/cixvadjgd5cvpvk2ts6iikxzkjfuw427jazbbrdgohkup7y7wbgi.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12288
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ld/clde4b7buriln3itqc2hfrrtncfdf3qkcbtrvjkxbqlumot7pbs7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 73728
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (1728*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/p5/cp52po6uaj4bcdvinq444ohpeuilqkinxwc7z5zkf2fiwsqlw3ij.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 98304
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = (yindex // 384)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (384*x2) + (3456*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rw/crwxihz2xdn6vknnrjr5if7hyms65a7dv6ub7vsls72ck5xfuwfz.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e2/ce2uudcvbgtkh3eqlc2hd7jhlpyozlyjzfvrt7va6lwbnuuya25f.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 256
xnumel = 225
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (225*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + (64*x2) + (14400*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ur/currihw6cbj77blwbzzhjvr5qjahvt3pb7w6xewlivga7mmvhktm.py
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# x_1 => relu_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 7
x2 = (xindex // 448) % 7
x3 = (xindex // 3136)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (960 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp7 = tl.load(in_ptr0 + (1024 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp9 = tl.load(in_ptr0 + (1088 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp11 = tl.load(in_ptr0 + (1920 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp13 = tl.load(in_ptr0 + (1984 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp15 = tl.load(in_ptr0 + (2048 + x0 + (128*x1) + (1920*x2) + (14400*x3)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tl.full([1], 0, tl.int32)
tmp43 = triton_helpers.maximum(tmp42, tmp16)
tl.store(out_ptr0 + (x4), tmp41, xmask)
tl.store(in_out_ptr0 + (x4), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/es/cesojqky2hzg4iahlzl7ylujui3xci3o5v3erzeem7labgginwp7.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => relu_2
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 37632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hf/chfrnhvdhtegpiwpycesueprzedfmuv4a7q7jx4qhuxrk5vrgvy4.py
# Topologically Sorted Source Nodes: [max_pool2d_1, x_3], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# x_3 => relu_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 192
x1 = (xindex // 192) % 3
x2 = (xindex // 576) % 3
x3 = (xindex // 1728)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (192 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp3 = tl.load(in_ptr0 + (384 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (1344 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp7 = tl.load(in_ptr0 + (1536 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp9 = tl.load(in_ptr0 + (1728 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp11 = tl.load(in_ptr0 + (2688 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp13 = tl.load(in_ptr0 + (2880 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp15 = tl.load(in_ptr0 + (3072 + x0 + (384*x1) + (2688*x2) + (9408*x3)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tl.full([1], 0, tl.int32)
tmp43 = triton_helpers.maximum(tmp42, tmp16)
tl.store(out_ptr0 + (x4), tmp41, xmask)
tl.store(in_out_ptr0 + (x4), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/p5/cp55ftpyktkskbllbnyu5jmpb3nidi4ts6rp6ahrdwcek6keqtch.py
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_4 => relu_4
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 13824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/r7/cr7uq5ur2j4jwzfgtl7exszppe4m5worsfpyxi3aykaqmannjwdx.py
# Topologically Sorted Source Nodes: [conv2d_3, x_5], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_5 => relu_5
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rq/crqdzonsfsxhilmt766d6o5rgiuvdyvn6wsso2uwektg5i752j4j.py
# Topologically Sorted Source Nodes: [max_pool2d_2, x_7], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# x_7 => relu_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_6, [3, 3], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_4,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_10(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = (xindex // 256)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (2304*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (2304*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0 + (2304*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (768 + x0 + (2304*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (1024 + x0 + (2304*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (1280 + x0 + (2304*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (1536 + x0 + (2304*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (1792 + x0 + (2304*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (2048 + x0 + (2304*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tl.full([1], 0, tl.int32)
tmp43 = triton_helpers.maximum(tmp42, tmp16)
tl.store(out_ptr0 + (x2), tmp41, xmask)
tl.store(in_out_ptr0 + (x2), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ka/ckawyhrrmtnmabd72ve7vfay5xhuu534xw3spak4ktpxro7v2qpm.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_10 => relu_8
# Graph fragment:
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view,), kwargs = {})
triton_poi_fused_relu_11 = async_compile.triton('triton_poi_fused_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 9216
x2 = xindex
tmp0 = (((x0 // 6) % 6) // 6)
tmp1 = 1 + (((x0 // 6) % 6) // 6)
tmp2 = tmp0 < tmp1
tmp3 = ((x0 % 6) // 6)
tmp4 = 1 + ((x0 % 6) // 6)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((x2 // 36)), tmp6, eviction_policy='evict_last', other=0.0)
tmp8 = 1.0
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp6, tmp8, tmp9)
tmp11 = tmp7 / tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4z/c4zaj5yzbaoz3qiwdwbyavbi6c742vxo2yfcgabmkc3cjy5zbxxw.py
# Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_11 => relu_9
# x_12 => relu_10
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_13), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%relu_9,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_9, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_12 = async_compile.triton('triton_poi_fused_relu_threshold_backward_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(out_ptr0 + (x2), tmp5, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zw/czwvzs5m5d43fg5nljxtnvbzrdwlgwmo36jgn2z3ah7loe5hm2nr.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_13 => relu_11
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {})
# %relu_11 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_13 = async_compile.triton('triton_poi_fused_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 11, 11), (121, 121, 11, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (192, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (192, ), (1, ))
assert_size_stride(primals_6, (384, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_7, (384, ), (1, ))
assert_size_stride(primals_8, (256, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (4096, 9216), (9216, 1))
assert_size_stride(primals_13, (4096, ), (1, ))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096, ), (1, ))
assert_size_stride(primals_16, (4, 4096), (4096, 1))
assert_size_stride(primals_17, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((192, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 12288, 25, grid=grid(12288, 25), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((384, 192, 3, 3), (1728, 1, 576, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 73728, 9, grid=grid(73728, 9), stream=stream0)
del primals_6
buf2 = empty_strided_cuda((256, 384, 3, 3), (3456, 1, 1152, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf2, 98304, 9, grid=grid(98304, 9), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_10, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_10
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 15, 15), (14400, 225, 15, 1))
buf5 = empty_strided_cuda((4, 64, 15, 15), (14400, 1, 960, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf4, primals_2, buf5, 256, 225, grid=grid(256, 225), stream=stream0)
del buf4
del primals_2
buf6 = empty_strided_cuda((4, 64, 7, 7), (3136, 1, 448, 64), torch.float32)
buf7 = empty_strided_cuda((4, 64, 7, 7), (3136, 1, 448, 64), torch.int8)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_5.run(buf8, buf5, buf7, 12544, grid=grid(12544), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, buf0, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf10, primals_5, 37632, grid=grid(37632), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 192, 3, 3), (1728, 1, 576, 192), torch.float32)
buf12 = empty_strided_cuda((4, 192, 3, 3), (1728, 1, 576, 192), torch.int8)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [max_pool2d_1, x_3], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_7.run(buf13, buf10, buf12, 6912, grid=grid(6912), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 384, 3, 3), (3456, 1, 1152, 384))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf15, primals_7, 13824, grid=grid(13824), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 3, 3), (2304, 1, 768, 256))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf17, primals_9, 9216, grid=grid(9216), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 3, 3), (2304, 1, 768, 256))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf19, primals_11, 9216, grid=grid(9216), stream=stream0)
del primals_11
buf20 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024), torch.float32)
buf21 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 256, 256), torch.int8)
buf22 = reinterpret_tensor(buf20, (4, 256, 1, 1), (256, 1, 256, 256), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [max_pool2d_2, x_7], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_10.run(buf22, buf19, buf21, 1024, grid=grid(1024), stream=stream0)
buf23 = empty_strided_cuda((4, 9216), (9216, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
triton_poi_fused_relu_11.run(buf22, buf23, 36864, grid=grid(36864), stream=stream0)
buf24 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf23, reinterpret_tensor(primals_12, (9216, 4096), (1, 9216), 0), out=buf24)
buf25 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4096), (4096, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_12.run(buf24, primals_13, buf25, buf29, 16384, grid=grid(16384), stream=stream0)
del primals_13
buf26 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf25, reinterpret_tensor(primals_14, (4096, 4096), (1, 4096), 0), out=buf26)
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.relu]
triton_poi_fused_relu_13.run(buf27, primals_15, 16384, grid=grid(16384), stream=stream0)
del primals_15
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, buf27, reinterpret_tensor(primals_16, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf28)
del primals_17
return (buf28, primals_1, primals_3, buf0, buf1, buf2, buf3, buf5, buf7, buf8, buf10, buf12, buf13, buf15, buf17, buf19, buf21, buf22, buf23, buf25, buf27, primals_16, primals_14, buf29, primals_12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 11, 11), (121, 121, 11, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((192, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((384, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4096, 9216), (9216, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4096, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AlexNet(nn.Module):
"""
Our first more "succesful" network, slightly modified AlexNet
that accepts images in with 1 channel (i.e. grayscale).
Attributes
----------
c: torch.nn.modules.conv.Conv2d
Applies a 2D convolution over an input signal composed
of several input planes
s: torch.nn.modules.pooling.MaxPool2d
Applies a 2D max pooling over an input signal composed
of several input planes
avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d
Applies a 2D adaptive average pooling over an input signal composed
of several input planes
d: torch.nn.modules.dropout.Dropout
Randomly zeroes some elements of the input tensor during training
using a Bernoulli distribution
f: torch.nn.modules.linear.Linear
Applies a linear transformation to the incoming data
output: torch.nn.modules.linear.Linear
Applies a linear transformation with the required output channels
act: torch.nn.modules.activation.ReLU
Applies a rectified linear unit function
Methods
-------
forward(x)
Forward pass using the layers and activations of the class
"""
def __init__(self):
super(AlexNet, self).__init__()
self.c1 = nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2)
self.s2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.c3 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.s4 = nn.MaxPool2d(kernel_size=3, stride=2)
self.c5 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.c6 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.c7 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.s8 = nn.MaxPool2d(kernel_size=3, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.d9 = nn.Dropout()
self.f10 = nn.Linear(256 * 6 * 6, 4096)
self.d11 = nn.Dropout()
self.f12 = nn.Linear(4096, 4096)
self.output = nn.Linear(4096, 4)
self.act = nn.ReLU()
def forward(self, x):
"""
Forward pass using the layers and activations of the class.
Parameters
----------
x: tensor
flattened or reshaped tensor matching the input dimensions
required for the fully connect block of neural network
Returns
-------
tensor
Forwarded tensor
"""
x = self.act(self.c1(x))
x = self.act(self.s2(x))
x = self.act(self.c3(x))
x = self.act(self.s4(x))
x = self.act(self.c5(x))
x = self.act(self.c6(x))
x = self.act(self.c7(x))
x = self.act(self.s8(x))
x = self.avgpool(x)
x = x.view(-1, x.size(1) * x.size(2) * x.size(3))
x = self.act(self.d9(x))
x = self.act(self.f10(x))
x = self.act(self.d11(x))
x = self.act(self.f12(x))
return self.output(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1728 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 3456 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 225
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 225 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 14400 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 7
x2 = xindex // 448 % 7
x3 = xindex // 3136
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 1920 * x2 + 14400 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 1920 * x2 + 14400 * x3),
xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 128 * x1 + 1920 * x2 + 14400 * x3),
xmask)
tmp5 = tl.load(in_ptr0 + (960 + x0 + 128 * x1 + 1920 * x2 + 14400 * x3),
xmask)
tmp7 = tl.load(in_ptr0 + (1024 + x0 + 128 * x1 + 1920 * x2 + 14400 * x3
), xmask)
tmp9 = tl.load(in_ptr0 + (1088 + x0 + 128 * x1 + 1920 * x2 + 14400 * x3
), xmask)
tmp11 = tl.load(in_ptr0 + (1920 + x0 + 128 * x1 + 1920 * x2 + 14400 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (1984 + x0 + 128 * x1 + 1920 * x2 + 14400 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 1920 * x2 + 14400 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tl.full([1], 0, tl.int32)
tmp43 = triton_helpers.maximum(tmp42, tmp16)
tl.store(out_ptr0 + x4, tmp41, xmask)
tl.store(in_out_ptr0 + x4, tmp43, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 37632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 192
x1 = xindex // 192 % 3
x2 = xindex // 576 % 3
x3 = xindex // 1728
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 384 * x1 + 2688 * x2 + 9408 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (192 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3),
xmask)
tmp3 = tl.load(in_ptr0 + (384 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3),
xmask)
tmp5 = tl.load(in_ptr0 + (1344 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3),
xmask)
tmp7 = tl.load(in_ptr0 + (1536 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3),
xmask)
tmp9 = tl.load(in_ptr0 + (1728 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3),
xmask)
tmp11 = tl.load(in_ptr0 + (2688 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3
), xmask)
tmp13 = tl.load(in_ptr0 + (2880 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3
), xmask)
tmp15 = tl.load(in_ptr0 + (3072 + x0 + 384 * x1 + 2688 * x2 + 9408 * x3
), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tl.full([1], 0, tl.int32)
tmp43 = triton_helpers.maximum(tmp42, tmp16)
tl.store(out_ptr0 + x4, tmp41, xmask)
tl.store(in_out_ptr0 + x4, tmp43, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 13824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_10(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 2304 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 2304 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 2304 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (768 + x0 + 2304 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (1024 + x0 + 2304 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (1280 + x0 + 2304 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (1536 + x0 + 2304 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (1792 + x0 + 2304 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (2048 + x0 + 2304 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tmp42 = tl.full([1], 0, tl.int32)
tmp43 = triton_helpers.maximum(tmp42, tmp16)
tl.store(out_ptr0 + x2, tmp41, xmask)
tl.store(in_out_ptr0 + x2, tmp43, xmask)
@triton.jit
def triton_poi_fused_relu_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 9216
x2 = xindex
tmp0 = x0 // 6 % 6 // 6
tmp1 = 1 + x0 // 6 % 6 // 6
tmp2 = tmp0 < tmp1
tmp3 = x0 % 6 // 6
tmp4 = 1 + x0 % 6 // 6
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + x2 // 36, tmp6, eviction_policy='evict_last',
other=0.0)
tmp8 = 1.0
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp6, tmp8, tmp9)
tmp11 = tmp7 / tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp13, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(out_ptr0 + x2, tmp5, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 11, 11), (121, 121, 11, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (192, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (192,), (1,))
assert_size_stride(primals_6, (384, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (256, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4096, 9216), (9216, 1))
assert_size_stride(primals_13, (4096,), (1,))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096,), (1,))
assert_size_stride(primals_16, (4, 4096), (4096, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((192, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12288, 25)](primals_4, buf0, 12288, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((384, 192, 3, 3), (1728, 1, 576, 192),
torch.float32)
triton_poi_fused_1[grid(73728, 9)](primals_6, buf1, 73728, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((256, 384, 3, 3), (3456, 1, 1152, 384),
torch.float32)
triton_poi_fused_2[grid(98304, 9)](primals_8, buf2, 98304, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(65536, 9)](primals_10, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 15, 15), (14400, 225, 15, 1))
buf5 = empty_strided_cuda((4, 64, 15, 15), (14400, 1, 960, 64),
torch.float32)
triton_poi_fused_convolution_relu_4[grid(256, 225)](buf4, primals_2,
buf5, 256, 225, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf4
del primals_2
buf6 = empty_strided_cuda((4, 64, 7, 7), (3136, 1, 448, 64), torch.
float32)
buf7 = empty_strided_cuda((4, 64, 7, 7), (3136, 1, 448, 64), torch.int8
)
buf8 = buf6
del buf6
triton_poi_fused_max_pool2d_with_indices_relu_5[grid(12544)](buf8,
buf5, buf7, 12544, XBLOCK=128, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, buf0, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_6[grid(37632)](buf10, primals_5,
37632, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 192, 3, 3), (1728, 1, 576, 192),
torch.float32)
buf12 = empty_strided_cuda((4, 192, 3, 3), (1728, 1, 576, 192),
torch.int8)
buf13 = buf11
del buf11
triton_poi_fused_max_pool2d_with_indices_relu_7[grid(6912)](buf13,
buf10, buf12, 6912, XBLOCK=256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 384, 3, 3), (3456, 1, 1152, 384))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_8[grid(13824)](buf15, primals_7,
13824, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 3, 3), (2304, 1, 768, 256))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_9[grid(9216)](buf17, primals_9,
9216, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf18 = extern_kernels.convolution(buf17, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 3, 3), (2304, 1, 768, 256))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_9[grid(9216)](buf19, primals_11,
9216, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf20 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024),
torch.float32)
buf21 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 256, 256),
torch.int8)
buf22 = reinterpret_tensor(buf20, (4, 256, 1, 1), (256, 1, 256, 256), 0
)
del buf20
triton_poi_fused_max_pool2d_with_indices_relu_10[grid(1024)](buf22,
buf19, buf21, 1024, XBLOCK=256, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((4, 9216), (9216, 1), torch.float32)
triton_poi_fused_relu_11[grid(36864)](buf22, buf23, 36864, XBLOCK=
256, num_warps=4, num_stages=1)
buf24 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf23, reinterpret_tensor(primals_12, (9216, 4096
), (1, 9216), 0), out=buf24)
buf25 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4096), (4096, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_12[grid(16384)](buf24,
primals_13, buf25, buf29, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_13
buf26 = buf24
del buf24
extern_kernels.mm(buf25, reinterpret_tensor(primals_14, (4096, 4096
), (1, 4096), 0), out=buf26)
buf27 = buf26
del buf26
triton_poi_fused_relu_13[grid(16384)](buf27, primals_15, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_17, buf27, reinterpret_tensor(
primals_16, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf28)
del primals_17
return (buf28, primals_1, primals_3, buf0, buf1, buf2, buf3, buf5, buf7,
buf8, buf10, buf12, buf13, buf15, buf17, buf19, buf21, buf22, buf23,
buf25, buf27, primals_16, primals_14, buf29, primals_12)
class AlexNetNew(nn.Module):
"""
Our first more "succesful" network, slightly modified AlexNet
that accepts images in with 1 channel (i.e. grayscale).
Attributes
----------
c: torch.nn.modules.conv.Conv2d
Applies a 2D convolution over an input signal composed
of several input planes
s: torch.nn.modules.pooling.MaxPool2d
Applies a 2D max pooling over an input signal composed
of several input planes
avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d
Applies a 2D adaptive average pooling over an input signal composed
of several input planes
d: torch.nn.modules.dropout.Dropout
Randomly zeroes some elements of the input tensor during training
using a Bernoulli distribution
f: torch.nn.modules.linear.Linear
Applies a linear transformation to the incoming data
output: torch.nn.modules.linear.Linear
Applies a linear transformation with the required output channels
act: torch.nn.modules.activation.ReLU
Applies a rectified linear unit function
Methods
-------
forward(x)
Forward pass using the layers and activations of the class
"""
def __init__(self):
super(AlexNetNew, self).__init__()
self.c1 = nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2)
self.s2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.c3 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.s4 = nn.MaxPool2d(kernel_size=3, stride=2)
self.c5 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.c6 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.c7 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.s8 = nn.MaxPool2d(kernel_size=3, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.d9 = nn.Dropout()
self.f10 = nn.Linear(256 * 6 * 6, 4096)
self.d11 = nn.Dropout()
self.f12 = nn.Linear(4096, 4096)
self.output = nn.Linear(4096, 4)
self.act = nn.ReLU()
def forward(self, input_0):
primals_1 = self.c1.weight
primals_2 = self.c1.bias
primals_4 = self.c3.weight
primals_5 = self.c3.bias
primals_6 = self.c5.weight
primals_7 = self.c5.bias
primals_8 = self.c6.weight
primals_9 = self.c6.bias
primals_10 = self.c7.weight
primals_11 = self.c7.bias
primals_12 = self.f10.weight
primals_13 = self.f10.bias
primals_14 = self.f12.weight
primals_15 = self.f12.bias
primals_16 = self.output.weight
primals_17 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| jamessoni/x-ray-ML-classification | AlexNet | false | 3,854 | [
"MIT"
] | 0 | 6934f37631d367cdbe813fa6a2cbdc673c64c503 | https://github.com/jamessoni/x-ray-ML-classification/tree/6934f37631d367cdbe813fa6a2cbdc673c64c503 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Our first more "succesful" network, slightly modified AlexNet
that accepts images in with 1 channel (i.e. grayscale).
Attributes
----------
c: torch.nn.modules.conv.Conv2d
Applies a 2D convolution over an input signal composed
of several input planes
s: torch.nn.modules.pooling.MaxPool2d
Applies a 2D max pooling over an input signal composed
of several input planes
avgpool: torch.nn.modules.pooling.AdaptiveAvgPool2d
Applies a 2D adaptive average pooling over an input signal composed
of several input planes
d: torch.nn.modules.dropout.Dropout
Randomly zeroes some elements of the input tensor during training
using a Bernoulli distribution
f: torch.nn.modules.linear.Linear
Applies a linear transformation to the incoming data
output: torch.nn.modules.linear.Linear
Applies a linear transformation with the required output channels
act: torch.nn.modules.activation.ReLU
Applies a rectified linear unit function
Methods
-------
forward(x)
Forward pass using the layers and activations of the class
"""
def __init__(self):
super().__init__()
self.c1 = nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2)
self.s2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.c3 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.s4 = nn.MaxPool2d(kernel_size=3, stride=2)
self.c5 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.c6 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.c7 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.s8 = nn.MaxPool2d(kernel_size=3, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.d9 = nn.Dropout()
self.f10 = nn.Linear(256 * 6 * 6, 4096)
self.d11 = nn.Dropout()
self.f12 = nn.Linear(4096, 4096)
self.output = nn.Linear(4096, 4)
self.act = nn.ReLU()
def forward(self, x):
"""
Forward pass using the layers and activations of the class.
Parameters
----------
x: tensor
flattened or reshaped tensor matching the input dimensions
required for the fully connect block of neural network
Returns
-------
tensor
Forwarded tensor
"""
x = self.act(self.c1(x))
x = self.act(self.s2(x))
x = self.act(self.c3(x))
x = self.act(self.s4(x))
x = self.act(self.c5(x))
x = self.act(self.c6(x))
x = self.act(self.c7(x))
x = self.act(self.s8(x))
x = self.avgpool(x)
x = x.view(-1, x.size(1) * x.size(2) * x.size(3))
x = self.act(self.d9(x))
x = self.act(self.f10(x))
x = self.act(self.d11(x))
x = self.act(self.f12(x))
return self.output(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/p7/cp7xzeyl6japtnkojqx5iupjksot3nuocbambsy2o3yflsevkl5j.py
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jq/cjqs74o7upz4nudmv2dbjbmggtmuj2mktvc7gygosrcj2d2xxghd.py
# Topologically Sorted Source Nodes: [result_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# result_3 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %view_13], -1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x1 + (16*((-4) + x0))), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/t2/ct2fj4cxakdevxwd5upea4iyfznuislybj5p4wd6jgtx5ayzurnk.py
# Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# result_5 => add
# result_6 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %primals_2), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6g/c6gddvf7hivp3xdh4pyazhygzjkdnh5sxyn6itmcverzcfqnfwwt.py
# Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# result_5 => add
# result_6 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf2, buf3, 4, 16, grid=grid(4, 16), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf0, buf4, 4, 16, grid=grid(4, 16), stream=stream0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, buf8, 4, 16, grid=grid(4, 16), stream=stream0)
buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [result_3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(primals_2, buf9, buf10, 128, grid=grid(128), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [result_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(buf11, primals_2, buf12, buf13, 16, grid=grid(16), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [result_5, result_6], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(buf11, primals_2, buf12, buf13, primals_8, primals_9, buf14, 64, grid=grid(64), stream=stream0)
del buf12
del buf13
del primals_9
return (buf14, buf7, primals_2, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch as t
class Linear(nn.Module):
"""
Linear Module
"""
def __init__(self, in_dim, out_dim, bias=True, w_init='linear'):
"""
:param in_dim: dimension of input
:param out_dim: dimension of output
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Linear, self).__init__()
self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias)
nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
return self.linear_layer(x)
class MultiheadAttention(nn.Module):
"""
Multihead attention mechanism (dot attention)
"""
def __init__(self, num_hidden_k):
"""
:param num_hidden_k: dimension of hidden
"""
super(MultiheadAttention, self).__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query, mask=None, query_mask=None):
attn = t.bmm(query, key.transpose(1, 2))
attn = attn / math.sqrt(self.num_hidden_k)
if mask is not None:
attn = attn.masked_fill(mask, -2 ** 32 + 1)
attn = t.softmax(attn, dim=-1)
else:
attn = t.softmax(attn, dim=-1)
if query_mask is not None:
attn = attn * query_mask
result = t.bmm(attn, value)
return result, attn
class Attention(nn.Module):
"""
Attention Network
"""
def __init__(self, num_hidden, h=4):
"""
:param num_hidden: dimension of hidden
:param h: num of heads
"""
super(Attention, self).__init__()
self.num_hidden = num_hidden
self.num_hidden_per_attn = num_hidden // h
self.h = h
self.key = Linear(num_hidden, num_hidden, bias=False)
self.value = Linear(num_hidden, num_hidden, bias=False)
self.query = Linear(num_hidden, num_hidden, bias=False)
self.multihead = MultiheadAttention(self.num_hidden_per_attn)
self.residual_dropout = nn.Dropout(p=0.1)
self.final_linear = Linear(num_hidden * 2, num_hidden)
self.layer_norm_1 = nn.LayerNorm(num_hidden)
def forward(self, memory, decoder_input, mask=None, query_mask=None):
batch_size = memory.size(0)
seq_k = memory.size(1)
seq_q = decoder_input.size(1)
if query_mask is not None:
query_mask = query_mask.unsqueeze(-1).repeat(1, 1, seq_k)
query_mask = query_mask.repeat(self.h, 1, 1)
if mask is not None:
mask = mask.repeat(self.h, 1, 1)
key = self.key(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
value = self.value(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
query = self.query(decoder_input).view(batch_size, seq_q, self.h,
self.num_hidden_per_attn)
key = key.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self.
num_hidden_per_attn)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self
.num_hidden_per_attn)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, seq_q, self
.num_hidden_per_attn)
result, attns = self.multihead(key, value, query, mask=mask,
query_mask=query_mask)
result = result.view(self.h, batch_size, seq_q, self.
num_hidden_per_attn)
result = result.permute(1, 2, 0, 3).contiguous().view(batch_size,
seq_q, -1)
result = t.cat([decoder_input, result], dim=-1)
result = self.final_linear(result)
result = result + decoder_input
result = self.layer_norm_1(result)
return result, attns
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch as t
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4, 16)](buf2, buf3, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_clone_0[grid(4, 16)](buf0, buf4, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(4, 16)](buf1, buf8, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1,
0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_3[grid(128)](primals_2, buf9, buf10, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8),
(8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16)](buf11, primals_2,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(64)](buf11, primals_2,
buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_9
return buf14, buf7, primals_2, primals_8, reinterpret_tensor(primals_1,
(16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0
), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0)
class Linear(nn.Module):
"""
Linear Module
"""
def __init__(self, in_dim, out_dim, bias=True, w_init='linear'):
"""
:param in_dim: dimension of input
:param out_dim: dimension of output
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Linear, self).__init__()
self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias)
nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
return self.linear_layer(x)
class MultiheadAttention(nn.Module):
"""
Multihead attention mechanism (dot attention)
"""
def __init__(self, num_hidden_k):
"""
:param num_hidden_k: dimension of hidden
"""
super(MultiheadAttention, self).__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query, mask=None, query_mask=None):
attn = t.bmm(query, key.transpose(1, 2))
attn = attn / math.sqrt(self.num_hidden_k)
if mask is not None:
attn = attn.masked_fill(mask, -2 ** 32 + 1)
attn = t.softmax(attn, dim=-1)
else:
attn = t.softmax(attn, dim=-1)
if query_mask is not None:
attn = attn * query_mask
result = t.bmm(attn, value)
return result, attn
class AttentionNew(nn.Module):
"""
Attention Network
"""
def __init__(self, num_hidden, h=4):
"""
:param num_hidden: dimension of hidden
:param h: num of heads
"""
super(AttentionNew, self).__init__()
self.num_hidden = num_hidden
self.num_hidden_per_attn = num_hidden // h
self.h = h
self.key = Linear(num_hidden, num_hidden, bias=False)
self.value = Linear(num_hidden, num_hidden, bias=False)
self.query = Linear(num_hidden, num_hidden, bias=False)
self.multihead = MultiheadAttention(self.num_hidden_per_attn)
self.residual_dropout = nn.Dropout(p=0.1)
self.final_linear = Linear(num_hidden * 2, num_hidden)
self.layer_norm_1 = nn.LayerNorm(num_hidden)
def forward(self, input_0, input_1):
primals_3 = self.key.linear_layer.weight
primals_4 = self.value.linear_layer.weight
primals_5 = self.query.linear_layer.weight
primals_6 = self.final_linear.linear_layer.weight
primals_7 = self.final_linear.linear_layer.bias
primals_8 = self.layer_norm_1.weight
primals_9 = self.layer_norm_1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| kongziyue1234/mooc | Attention | false | 3,855 | [
"MIT"
] | 0 | 3b0c822dd55c1066cbc829137e6c424dcda5067e | https://github.com/kongziyue1234/mooc/tree/3b0c822dd55c1066cbc829137e6c424dcda5067e | import math
import torch
import torch.nn as nn
import torch as t
class Linear(nn.Module):
"""
Linear Module
"""
def __init__(self, in_dim, out_dim, bias=True, w_init='linear'):
"""
:param in_dim: dimension of input
:param out_dim: dimension of output
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super().__init__()
self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias)
nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
return self.linear_layer(x)
class MultiheadAttention(nn.Module):
"""
Multihead attention mechanism (dot attention)
"""
def __init__(self, num_hidden_k):
"""
:param num_hidden_k: dimension of hidden
"""
super().__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query, mask=None, query_mask=None):
attn = t.bmm(query, key.transpose(1, 2))
attn = attn / math.sqrt(self.num_hidden_k)
if mask is not None:
attn = attn.masked_fill(mask, -2 ** 32 + 1)
attn = t.softmax(attn, dim=-1)
else:
attn = t.softmax(attn, dim=-1)
if query_mask is not None:
attn = attn * query_mask
result = t.bmm(attn, value)
return result, attn
class Model(nn.Module):
"""
Attention Network
"""
def __init__(self, num_hidden, h=4):
"""
:param num_hidden: dimension of hidden
:param h: num of heads
"""
super().__init__()
self.num_hidden = num_hidden
self.num_hidden_per_attn = num_hidden // h
self.h = h
self.key = Linear(num_hidden, num_hidden, bias=False)
self.value = Linear(num_hidden, num_hidden, bias=False)
self.query = Linear(num_hidden, num_hidden, bias=False)
self.multihead = MultiheadAttention(self.num_hidden_per_attn)
self.residual_dropout = nn.Dropout(p=0.1)
self.final_linear = Linear(num_hidden * 2, num_hidden)
self.layer_norm_1 = nn.LayerNorm(num_hidden)
def forward(self, memory, decoder_input, mask=None, query_mask=None):
batch_size = memory.size(0)
seq_k = memory.size(1)
seq_q = decoder_input.size(1)
if query_mask is not None:
query_mask = query_mask.unsqueeze(-1).repeat(1, 1, seq_k)
query_mask = query_mask.repeat(self.h, 1, 1)
if mask is not None:
mask = mask.repeat(self.h, 1, 1)
key = self.key(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
value = self.value(memory).view(batch_size, seq_k, self.h, self.
num_hidden_per_attn)
query = self.query(decoder_input).view(batch_size, seq_q, self.h,
self.num_hidden_per_attn)
key = key.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self.
num_hidden_per_attn)
value = value.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self
.num_hidden_per_attn)
query = query.permute(2, 0, 1, 3).contiguous().view(-1, seq_q, self
.num_hidden_per_attn)
result, attns = self.multihead(key, value, query, mask=mask,
query_mask=query_mask)
result = result.view(self.h, batch_size, seq_q, self.
num_hidden_per_attn)
result = result.permute(1, 2, 0, 3).contiguous().view(batch_size,
seq_q, -1)
result = t.cat([decoder_input, result], dim=-1)
result = self.final_linear(result)
result = result + decoder_input
result = self.layer_norm_1(result)
return result, attns
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
OrientedIOUloss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4s/c4sw3qsbtwmtvpwhsllvmuanm47rmd4zggvm25f6ifqltgp5wpg5.py
# Topologically Sorted Source Nodes: [area_p, area_g, add_2, truediv_2, add, truediv_3, add_1, br, truediv, sub, truediv_1, sub_1, tl, sub_2, prod_3, lt, type_1, en, area_i, area_u, add_3, iou, pow_1, sub_4, loss_alpha, add_4, loss_beta, loss], Original ATen: [aten.prod, aten.add, aten.div, aten.minimum, aten.sub, aten.maximum, aten.lt, aten._to_copy, aten.mul, aten.pow, aten.rsub, aten.mse_loss]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# area_g => prod_1
# area_i => mul
# area_p => prod
# area_u => sub_3
# br => minimum
# en => prod_2
# iou => div_4
# loss => add_5
# loss_alpha => pow_1, sub_4
# loss_beta => pow_2, sub_5
# lt => lt
# pow_1 => pow_3
# prod_3 => prod_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_4 => sub_6
# tl => maximum
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# truediv_3 => div_3
# type_1 => convert_element_type
# Graph fragment:
# %prod : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%slice_18, 1), kwargs = {})
# %prod_1 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%slice_20, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%prod, %prod_1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_12, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_10, %div_2), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_16, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_14, %div_3), kwargs = {})
# %minimum : [num_users=2] = call_function[target=torch.ops.aten.minimum.default](args = (%add, %add_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_4, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_2, %div), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_8, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %div_1), kwargs = {})
# %maximum : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%sub, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %maximum), kwargs = {})
# %prod_3 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%sub_2, 1), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Tensor](args = (%maximum, %minimum), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {})
# %prod_2 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%convert_element_type, 1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%prod_3, %prod_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mul), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_3, 1e-16), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_3), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div_4, 2), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_3), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select, %select_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_4, 2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_6, %pow_1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %select_3), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %pow_2), kwargs = {})
triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mse_loss_mul_pow_prod_rsub_sub_0 = async_compile.triton('triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mse_loss_mul_pow_prod_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mse_loss_mul_pow_prod_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mse_loss_mul_pow_prod_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (6*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (4 + (6*x0)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr1 + (4 + (6*x0)), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr0 + (5 + (6*x0)), xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr1 + (5 + (6*x0)), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp6 * tmp2
tmp8 = tmp5 + tmp7
tmp9 = triton_helpers.minimum(tmp4, tmp8)
tmp10 = tmp0 - tmp3
tmp11 = tmp5 - tmp7
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tmp9 - tmp12
tmp16 = tmp15 * tmp2
tmp17 = tmp14 + tmp16
tmp20 = tmp19 * tmp2
tmp21 = tmp18 + tmp20
tmp22 = triton_helpers.minimum(tmp17, tmp21)
tmp23 = tmp14 - tmp16
tmp24 = tmp18 - tmp20
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tmp22 - tmp25
tmp27 = tmp13 * tmp26
tmp28 = tmp12 < tmp9
tmp29 = tmp28.to(tl.float32)
tmp30 = tmp25 < tmp22
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp29 * tmp31
tmp33 = tmp27 * tmp32
tmp34 = tmp1 * tmp15
tmp35 = tmp6 * tmp19
tmp36 = tmp34 + tmp35
tmp37 = tmp36 - tmp33
tmp38 = 1e-16
tmp39 = tmp37 + tmp38
tmp40 = tmp33 / tmp39
tmp41 = tmp40 * tmp40
tmp42 = 1.0
tmp43 = tmp42 - tmp41
tmp46 = tmp44 - tmp45
tmp47 = tmp46 * tmp46
tmp48 = tmp43 + tmp47
tmp51 = tmp49 - tmp50
tmp52 = tmp51 * tmp51
tmp53 = tmp48 + tmp52
tl.store(in_out_ptr0 + (x0), tmp53, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 6), (6, 1))
assert_size_stride(arg1_1, (4, 6), (6, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [area_p, area_g, add_2, truediv_2, add, truediv_3, add_1, br, truediv, sub, truediv_1, sub_1, tl, sub_2, prod_3, lt, type_1, en, area_i, area_u, add_3, iou, pow_1, sub_4, loss_alpha, add_4, loss_beta, loss], Original ATen: [aten.prod, aten.add, aten.div, aten.minimum, aten.sub, aten.maximum, aten.lt, aten._to_copy, aten.mul, aten.pow, aten.rsub, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mse_loss_mul_pow_prod_rsub_sub_0.run(buf1, arg0_1, arg1_1, 4, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 6), (6, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 6), (6, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class OrientedIOUloss(nn.Module):
def __init__(self, reduction='none', loss_type='iou'):
super(OrientedIOUloss, self).__init__()
self.reduction = reduction
self.loss_type = loss_type
self.loss_mse = nn.MSELoss(reduction=reduction)
def forward(self, pred, target):
assert pred.shape[0] == target.shape[0]
pred = pred.view(-1, 6)
target = target.view(-1, 6)
tl = torch.max(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
br = torch.min(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_p = torch.prod(pred[:, 2:4], 1)
area_g = torch.prod(target[:, 2:4], 1)
en = (tl < br).type(tl.type()).prod(dim=1)
area_i = torch.prod(br - tl, 1) * en
area_u = area_p + area_g - area_i
iou = area_i / (area_u + 1e-16)
loss_alpha = self.loss_mse(pred[:, 4], target[:, 4])
loss_beta = self.loss_mse(pred[:, 5], target[:, 5])
if self.loss_type == 'iou':
loss = 1 - iou ** 2 + loss_alpha + loss_beta
elif self.loss_type == 'giou':
c_tl = torch.min(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
c_br = torch.max(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_c = torch.prod(c_br - c_tl, 1)
giou = iou - (area_c - area_u) / area_c.clamp(1e-16)
loss = 1 - giou.clamp(min=-1.0, max=1.0) + loss_alpha + loss_beta
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 6]), torch.rand([4, 6])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mse_loss_mul_pow_prod_rsub_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 6 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (1 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp44 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp45 = tl.load(in_ptr1 + (4 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp49 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp50 = tl.load(in_ptr1 + (5 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp6 * tmp2
tmp8 = tmp5 + tmp7
tmp9 = triton_helpers.minimum(tmp4, tmp8)
tmp10 = tmp0 - tmp3
tmp11 = tmp5 - tmp7
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tmp9 - tmp12
tmp16 = tmp15 * tmp2
tmp17 = tmp14 + tmp16
tmp20 = tmp19 * tmp2
tmp21 = tmp18 + tmp20
tmp22 = triton_helpers.minimum(tmp17, tmp21)
tmp23 = tmp14 - tmp16
tmp24 = tmp18 - tmp20
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tmp22 - tmp25
tmp27 = tmp13 * tmp26
tmp28 = tmp12 < tmp9
tmp29 = tmp28.to(tl.float32)
tmp30 = tmp25 < tmp22
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp29 * tmp31
tmp33 = tmp27 * tmp32
tmp34 = tmp1 * tmp15
tmp35 = tmp6 * tmp19
tmp36 = tmp34 + tmp35
tmp37 = tmp36 - tmp33
tmp38 = 1e-16
tmp39 = tmp37 + tmp38
tmp40 = tmp33 / tmp39
tmp41 = tmp40 * tmp40
tmp42 = 1.0
tmp43 = tmp42 - tmp41
tmp46 = tmp44 - tmp45
tmp47 = tmp46 * tmp46
tmp48 = tmp43 + tmp47
tmp51 = tmp49 - tmp50
tmp52 = tmp51 * tmp51
tmp53 = tmp48 + tmp52
tl.store(in_out_ptr0 + x0, tmp53, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 6), (6, 1))
assert_size_stride(arg1_1, (4, 6), (6, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mse_loss_mul_pow_prod_rsub_sub_0[
grid(4)](buf1, arg0_1, arg1_1, 4, XBLOCK=4, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
return buf1,
class OrientedIOUlossNew(nn.Module):
def __init__(self, reduction='none', loss_type='iou'):
super(OrientedIOUlossNew, self).__init__()
self.reduction = reduction
self.loss_type = loss_type
self.loss_mse = nn.MSELoss(reduction=reduction)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kuazhangxiaoai/YOLOX | OrientedIOUloss | false | 3,856 | [
"Apache-2.0"
] | 0 | 7aff49b25a8a80c4c33e941da416500eda72b1a2 | https://github.com/kuazhangxiaoai/YOLOX/tree/7aff49b25a8a80c4c33e941da416500eda72b1a2 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, reduction='none', loss_type='iou'):
super().__init__()
self.reduction = reduction
self.loss_type = loss_type
self.loss_mse = nn.MSELoss(reduction=reduction)
def forward(self, pred, target):
assert pred.shape[0] == target.shape[0]
pred = pred.view(-1, 6)
target = target.view(-1, 6)
tl = torch.max(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
br = torch.min(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_p = torch.prod(pred[:, 2:4], 1)
area_g = torch.prod(target[:, 2:4], 1)
en = (tl < br).type(tl.type()).prod(dim=1)
area_i = torch.prod(br - tl, 1) * en
area_u = area_p + area_g - area_i
iou = area_i / (area_u + 1e-16)
loss_alpha = self.loss_mse(pred[:, 4], target[:, 4])
loss_beta = self.loss_mse(pred[:, 5], target[:, 5])
if self.loss_type == 'iou':
loss = 1 - iou ** 2 + loss_alpha + loss_beta
elif self.loss_type == 'giou':
c_tl = torch.min(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
c_br = torch.max(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_c = torch.prod(c_br - c_tl, 1)
giou = iou - (area_c - area_u) / area_c.clamp(1e-16)
loss = 1 - giou.clamp(min=-1.0, max=1.0) + loss_alpha + loss_beta
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 6]), torch.rand([4, 6])]
def get_init_inputs():
return []
|
ReduceMax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/y7/cy7jkncvgqkch7rp5inkpciskf4ausuosjdvipi55vmji2a4yndh.py
# Topologically Sorted Source Nodes: [amax], Original ATen: [aten.amax]
# Source node to ATen node mapping:
# amax => amax
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1]), kwargs = {})
triton_poi_fused_amax_0 = async_compile.triton('triton_poi_fused_amax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_amax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_amax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [amax], Original ATen: [aten.amax]
stream0 = get_raw_stream(0)
triton_poi_fused_amax_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class ReduceMax(torch.nn.Module):
def forward(self, inputs, mask=None):
return torch.amax(inputs, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_amax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_amax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReduceMaxNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| jimthompson5802/ludwig | ReduceMax | false | 3,857 | [
"Apache-2.0"
] | 0 | 8a369328a3f839d9cdb3710be315952c7891d7c0 | https://github.com/jimthompson5802/ludwig/tree/8a369328a3f839d9cdb3710be315952c7891d7c0 | import torch
class Model(torch.nn.Module):
def forward(self, inputs, mask=None):
return torch.amax(inputs, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
IOUloss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lp/clpxlrk2lgzteqniugpoozrbz33fjedsntpm7cek5necq2iuzlbp.py
# Topologically Sorted Source Nodes: [area_p, area_g, add_2, truediv_2, add, truediv_3, add_1, br, truediv, sub, truediv_1, sub_1, tl, sub_2, prod_3, lt, type_1, en, area_i, area_u, add_3, iou, pow_1, loss], Original ATen: [aten.prod, aten.add, aten.div, aten.minimum, aten.sub, aten.maximum, aten.lt, aten._to_copy, aten.mul, aten.pow, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# area_g => prod_1
# area_i => mul
# area_p => prod
# area_u => sub_3
# br => minimum
# en => prod_2
# iou => div_4
# loss => sub_4
# lt => lt
# pow_1 => pow_1
# prod_3 => prod_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# tl => maximum
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# truediv_3 => div_3
# type_1 => convert_element_type
# Graph fragment:
# %prod : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%slice_18, 1), kwargs = {})
# %prod_1 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%slice_20, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%prod, %prod_1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_12, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_10, %div_2), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_16, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_14, %div_3), kwargs = {})
# %minimum : [num_users=2] = call_function[target=torch.ops.aten.minimum.default](args = (%add, %add_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_4, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_2, %div), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_8, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %div_1), kwargs = {})
# %maximum : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%sub, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %maximum), kwargs = {})
# %prod_3 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%sub_2, 1), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Tensor](args = (%maximum, %minimum), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {})
# %prod_2 : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%convert_element_type, 1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%prod_3, %prod_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mul), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_3, 1e-16), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_3), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div_4, 2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_1), kwargs = {})
triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0 = async_compile.triton('triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (6*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + (6*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (1 + (6*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (6*x0)), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp6 * tmp2
tmp8 = tmp5 + tmp7
tmp9 = triton_helpers.minimum(tmp4, tmp8)
tmp10 = tmp0 - tmp3
tmp11 = tmp5 - tmp7
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tmp9 - tmp12
tmp16 = tmp15 * tmp2
tmp17 = tmp14 + tmp16
tmp20 = tmp19 * tmp2
tmp21 = tmp18 + tmp20
tmp22 = triton_helpers.minimum(tmp17, tmp21)
tmp23 = tmp14 - tmp16
tmp24 = tmp18 - tmp20
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tmp22 - tmp25
tmp27 = tmp13 * tmp26
tmp28 = tmp12 < tmp9
tmp29 = tmp28.to(tl.float32)
tmp30 = tmp25 < tmp22
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp29 * tmp31
tmp33 = tmp27 * tmp32
tmp34 = tmp1 * tmp15
tmp35 = tmp6 * tmp19
tmp36 = tmp34 + tmp35
tmp37 = tmp36 - tmp33
tmp38 = 1e-16
tmp39 = tmp37 + tmp38
tmp40 = tmp33 / tmp39
tmp41 = tmp40 * tmp40
tmp42 = 1.0
tmp43 = tmp42 - tmp41
tl.store(in_out_ptr0 + (x0), tmp43, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 6), (6, 1))
assert_size_stride(arg1_1, (4, 6), (6, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [area_p, area_g, add_2, truediv_2, add, truediv_3, add_1, br, truediv, sub, truediv_1, sub_1, tl, sub_2, prod_3, lt, type_1, en, area_i, area_u, add_3, iou, pow_1, loss], Original ATen: [aten.prod, aten.add, aten.div, aten.minimum, aten.sub, aten.maximum, aten.lt, aten._to_copy, aten.mul, aten.pow, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0.run(buf1, arg0_1, arg1_1, 4, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 6), (6, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 6), (6, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class IOUloss(nn.Module):
def __init__(self, reduction='none', loss_type='iou'):
super(IOUloss, self).__init__()
self.reduction = reduction
self.loss_type = loss_type
def forward(self, pred, target):
assert pred.shape[0] == target.shape[0]
pred = pred.view(-1, 6)
target = target.view(-1, 6)
tl = torch.max(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
br = torch.min(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_p = torch.prod(pred[:, 2:4], 1)
area_g = torch.prod(target[:, 2:4], 1)
en = (tl < br).type(tl.type()).prod(dim=1)
area_i = torch.prod(br - tl, 1) * en
area_u = area_p + area_g - area_i
iou = area_i / (area_u + 1e-16)
if self.loss_type == 'iou':
loss = 1 - iou ** 2
elif self.loss_type == 'giou':
c_tl = torch.min(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
c_br = torch.max(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_c = torch.prod(c_br - c_tl, 1)
giou = iou - (area_c - area_u) / area_c.clamp(1e-16)
loss = 1 - giou.clamp(min=-1.0, max=1.0)
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 6]), torch.rand([4, 6])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 6 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (2 + 6 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (1 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 6 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp6 * tmp2
tmp8 = tmp5 + tmp7
tmp9 = triton_helpers.minimum(tmp4, tmp8)
tmp10 = tmp0 - tmp3
tmp11 = tmp5 - tmp7
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tmp9 - tmp12
tmp16 = tmp15 * tmp2
tmp17 = tmp14 + tmp16
tmp20 = tmp19 * tmp2
tmp21 = tmp18 + tmp20
tmp22 = triton_helpers.minimum(tmp17, tmp21)
tmp23 = tmp14 - tmp16
tmp24 = tmp18 - tmp20
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tmp22 - tmp25
tmp27 = tmp13 * tmp26
tmp28 = tmp12 < tmp9
tmp29 = tmp28.to(tl.float32)
tmp30 = tmp25 < tmp22
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp29 * tmp31
tmp33 = tmp27 * tmp32
tmp34 = tmp1 * tmp15
tmp35 = tmp6 * tmp19
tmp36 = tmp34 + tmp35
tmp37 = tmp36 - tmp33
tmp38 = 1e-16
tmp39 = tmp37 + tmp38
tmp40 = tmp33 / tmp39
tmp41 = tmp40 * tmp40
tmp42 = 1.0
tmp43 = tmp42 - tmp41
tl.store(in_out_ptr0 + x0, tmp43, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 6), (6, 1))
assert_size_stride(arg1_1, (4, 6), (6, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0[
grid(4)](buf1, arg0_1, arg1_1, 4, XBLOCK=4, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
return buf1,
class IOUlossNew(nn.Module):
def __init__(self, reduction='none', loss_type='iou'):
super(IOUlossNew, self).__init__()
self.reduction = reduction
self.loss_type = loss_type
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kuazhangxiaoai/YOLOX | IOUloss | false | 3,858 | [
"Apache-2.0"
] | 0 | 7aff49b25a8a80c4c33e941da416500eda72b1a2 | https://github.com/kuazhangxiaoai/YOLOX/tree/7aff49b25a8a80c4c33e941da416500eda72b1a2 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, reduction='none', loss_type='iou'):
super().__init__()
self.reduction = reduction
self.loss_type = loss_type
def forward(self, pred, target):
assert pred.shape[0] == target.shape[0]
pred = pred.view(-1, 6)
target = target.view(-1, 6)
tl = torch.max(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
br = torch.min(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_p = torch.prod(pred[:, 2:4], 1)
area_g = torch.prod(target[:, 2:4], 1)
en = (tl < br).type(tl.type()).prod(dim=1)
area_i = torch.prod(br - tl, 1) * en
area_u = area_p + area_g - area_i
iou = area_i / (area_u + 1e-16)
if self.loss_type == 'iou':
loss = 1 - iou ** 2
elif self.loss_type == 'giou':
c_tl = torch.min(pred[:, :2] - pred[:, 2:4] / 2, target[:, :2] -
target[:, 2:4] / 2)
c_br = torch.max(pred[:, :2] + pred[:, 2:4] / 2, target[:, :2] +
target[:, 2:4] / 2)
area_c = torch.prod(c_br - c_tl, 1)
giou = iou - (area_c - area_u) / area_c.clamp(1e-16)
loss = 1 - giou.clamp(min=-1.0, max=1.0)
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 6]), torch.rand([4, 6])]
def get_init_inputs():
return []
|
BWCEWLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ls/cls2tbcj37wpwheh2vwphvrxjnksqhrpy5c3ixm3i7qzrhg4pqc6.py
# Topologically Sorted Source Nodes: [train_loss, train_mean_loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean]
# Source node to ATen node mapping:
# train_loss => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# train_mean_loss => mean_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mean,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_mean_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = tmp17 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [train_loss, train_mean_loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from typing import Optional
from torch import nn
class LogitsInputsMixin:
@classmethod
def get_loss_inputs(cls):
"""Maps loss to the desired predicted input type."""
return LOGITS
class BWCEWLoss(nn.Module, LogitsInputsMixin):
"""Binary weighted cross entropy loss."""
def __init__(self, positive_class_weight: 'Optional[Tensor]'=None,
robust_lambda: 'int'=0, confidence_penalty: 'int'=0, **kwargs):
super().__init__()
self.loss_fn = nn.BCEWithLogitsLoss(pos_weight=
positive_class_weight, **kwargs)
self.robust_lambda = robust_lambda
self.confidence_penalty = confidence_penalty
def forward(self, preds: 'torch.Tensor', target: 'torch.Tensor'):
train_loss = self.loss_fn(preds, target.float())
if self.robust_lambda > 0:
train_loss = (1 - self.robust_lambda
) * train_loss + self.robust_lambda / 2
train_mean_loss = torch.mean(train_loss)
if self.confidence_penalty > 0:
probabilities = torch.sigmoid(preds)
mean_penalty = utils.mean_confidence_penalty(probabilities, 2)
train_mean_loss += self.confidence_penalty * mean_penalty
return train_mean_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import Tensor
from typing import Optional
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = tmp17 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LogitsInputsMixin:
@classmethod
def get_loss_inputs(cls):
"""Maps loss to the desired predicted input type."""
return LOGITS
class BWCEWLossNew(nn.Module, LogitsInputsMixin):
"""Binary weighted cross entropy loss."""
def __init__(self, positive_class_weight: 'Optional[Tensor]'=None,
robust_lambda: 'int'=0, confidence_penalty: 'int'=0, **kwargs):
super().__init__()
self.loss_fn = nn.BCEWithLogitsLoss(pos_weight=
positive_class_weight, **kwargs)
self.robust_lambda = robust_lambda
self.confidence_penalty = confidence_penalty
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| jimthompson5802/ludwig | BWCEWLoss | false | 3,859 | [
"Apache-2.0"
] | 0 | 8a369328a3f839d9cdb3710be315952c7891d7c0 | https://github.com/jimthompson5802/ludwig/tree/8a369328a3f839d9cdb3710be315952c7891d7c0 | import torch
from torch import Tensor
from typing import Optional
from torch import nn
class LogitsInputsMixin:
@classmethod
def get_loss_inputs(cls):
"""Maps loss to the desired predicted input type."""
return LOGITS
class Model(nn.Module, LogitsInputsMixin):
"""Binary weighted cross entropy loss."""
def __init__(self, positive_class_weight: 'Optional[Tensor]'=None,
robust_lambda: 'int'=0, confidence_penalty: 'int'=0, **kwargs):
super().__init__()
self.loss_fn = nn.BCEWithLogitsLoss(pos_weight=
positive_class_weight, **kwargs)
self.robust_lambda = robust_lambda
self.confidence_penalty = confidence_penalty
def forward(self, preds: 'torch.Tensor', target: 'torch.Tensor'):
train_loss = self.loss_fn(preds, target.float())
if self.robust_lambda > 0:
train_loss = (1 - self.robust_lambda
) * train_loss + self.robust_lambda / 2
train_mean_loss = torch.mean(train_loss)
if self.confidence_penalty > 0:
probabilities = torch.sigmoid(preds)
mean_penalty = utils.mean_confidence_penalty(probabilities, 2)
train_mean_loss += self.confidence_penalty * mean_penalty
return train_mean_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Scaler | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/d7/cd73eyyu3eretbckfbaicbjnmqzedz3jloqhmscamv5ilqjsfl2k.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.sub, aten.mul]
# Source node to ATen node mapping:
# x => sub
# x_1 => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {})
triton_poi_fused_mul_sub_0 = async_compile.triton('triton_poi_fused_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp64', 2: '*fp64', 3: '*fp64', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp5 = tl.load(in_ptr2 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp1 = tmp0.to(tl.float64)
tmp4 = tmp1 - tmp3
tmp7 = tmp4 * tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (1, ), (1, ))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float64)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.sub, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float64)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float64)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from abc import ABC
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class Scaler(BaseOperator, torch.nn.Module):
"""
Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, offset, scale, device):
super(Scaler, self).__init__(transformer=True)
self.offset = offset
self.scale = scale
if offset is not None:
self.offset = torch.nn.Parameter(torch.DoubleTensor([offset]),
requires_grad=False)
if scale is not None:
self.scale = torch.nn.Parameter(torch.DoubleTensor([scale]),
requires_grad=False)
def forward(self, x):
if self.offset is not None:
x = x - self.offset
if self.scale is not None:
x = x * self.scale
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'offset': 4, 'scale': 1.0, 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from abc import ABC
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp1 = tmp0.to(tl.float64)
tmp4 = tmp1 - tmp3
tmp7 = tmp4 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (1,), (1,))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float64)
get_raw_stream(0)
triton_poi_fused_mul_sub_0[grid(256)](arg1_1, arg0_1, arg2_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class ScalerNew(BaseOperator, torch.nn.Module):
"""
Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, offset, scale, device):
super(ScalerNew, self).__init__(transformer=True)
self.offset = offset
self.scale = scale
if offset is not None:
self.offset = torch.nn.Parameter(torch.DoubleTensor([offset]),
requires_grad=False)
if scale is not None:
self.scale = torch.nn.Parameter(torch.DoubleTensor([scale]),
requires_grad=False)
def forward(self, input_0):
arg0_1 = self.offset
arg2_1 = self.scale
arg1_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| kvenkman/hummingbird | Scaler | false | 3,860 | [
"MIT"
] | 0 | dac08f4ff4a4103df4a8e83329a02f2d804bf34d | https://github.com/kvenkman/hummingbird/tree/dac08f4ff4a4103df4a8e83329a02f2d804bf34d | import torch
from abc import ABC
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class Model(BaseOperator, torch.nn.Module):
"""
Class implementing Scaler operators in PyTorch. Supported normalizers are L1, L2 and Max.
"""
def __init__(self, offset, scale, device):
super().__init__(transformer=True)
self.offset = offset
self.scale = scale
if offset is not None:
self.offset = torch.nn.Parameter(torch.DoubleTensor([offset]),
requires_grad=False)
if scale is not None:
self.scale = torch.nn.Parameter(torch.DoubleTensor([scale]),
requires_grad=False)
def forward(self, x):
if self.offset is not None:
x = x - self.offset
if self.scale is not None:
x = x * self.scale
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 1.0, 0]
|
CausalConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bp/cbpkacuiaelhtemvvh3hnxwj7naxidcr3tkdhdb2ocoakxsxri2x.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (7*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7), (28, 7, 1))
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_2, buf1, 64, grid=grid(64), stream=stream0)
del buf0
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn as nn
class CausalConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
**kwargs):
super().__init__()
self.pad = (kernel_size - 1) * dilation
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.pad, **kwargs)
def forward(self, x: 'torch.Tensor'):
x = self.conv(x)
x = x[:, :, :-self.conv.padding[0]].contiguous()
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 7 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7), (28, 7, 1))
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](buf0, primals_2, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3
class CausalConv1dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
**kwargs):
super().__init__()
self.pad = (kernel_size - 1) * dilation
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.pad, **kwargs)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| kschamplin/astro-classifier-neo | CausalConv1d | false | 3,861 | [
"MIT"
] | 0 | 44fcb8ba41ef549c16360df7fd470f56c42da9b3 | https://github.com/kschamplin/astro-classifier-neo/tree/44fcb8ba41ef549c16360df7fd470f56c42da9b3 | import torch
from torch import nn as nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
**kwargs):
super().__init__()
self.pad = (kernel_size - 1) * dilation
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.pad, **kwargs)
def forward(self, x: 'torch.Tensor'):
x = self.conv(x)
x = x[:, :, :-self.conv.padding[0]].contiguous()
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Multiply | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/e4/ce4h2z2vthjeez5cqvrjtnep5b7t7jnbbufxqxbrvyhke72ruy4y.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 4), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from abc import ABC
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class Multiply(BaseOperator, torch.nn.Module):
"""
Module used to multiply features in a pipeline by a score.
"""
def __init__(self, score):
super(Multiply, self).__init__()
self.score = score
def forward(self, x):
return x * self.score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'score': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from abc import ABC
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class MultiplyNew(BaseOperator, torch.nn.Module):
"""
Module used to multiply features in a pipeline by a score.
"""
def __init__(self, score):
super(MultiplyNew, self).__init__()
self.score = score
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| kvenkman/hummingbird | Multiply | false | 3,862 | [
"MIT"
] | 0 | dac08f4ff4a4103df4a8e83329a02f2d804bf34d | https://github.com/kvenkman/hummingbird/tree/dac08f4ff4a4103df4a8e83329a02f2d804bf34d | import torch
from abc import ABC
class BaseOperator(ABC):
"""
Abstract class defining the basic structure for operator implementations in Hummingbird.
"""
def __init__(self, regression=False, classification=False, transformer=
False, anomaly_detection=False, **kwargs):
"""
Args:
regression: Whether operator is a regression model.
classification: Whether the operator is a classification model.
transformer: Whether the operator is a feature transformer.
anomaly_detection: Whether the operator is an anomaly detection model.
kwargs: Other keyword arguments.
"""
super().__init__()
self.regression = regression
self.classification = classification
self.transformer = transformer
self.anomaly_detection = anomaly_detection
class Model(BaseOperator, torch.nn.Module):
"""
Module used to multiply features in a pipeline by a score.
"""
def __init__(self, score):
super().__init__()
self.score = score
def forward(self, x):
return x * self.score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ReduceLast | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sz/cszdqebg5pns6veqfzmjstoqvgro5kzpfkqix2nafzrmf7gzh3he.py
# Topologically Sorted Source Nodes: [abs_1, amax, used, length, length_1, sequence_length, setitem], Original ATen: [aten.abs, aten.amax, aten.sign, aten.sum, aten._to_copy, aten.sub, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# abs_1 => abs_1
# amax => amax
# length => sum_1
# length_1 => convert_element_type
# sequence_length => sub
# setitem => full_default, index_put
# used => sign
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%abs_1, [2]), kwargs = {})
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%amax,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sign, [1]), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.int32), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convert_element_type, 1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%sub, [%lt], %full_default), kwargs = {})
triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0 = async_compile.triton('triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp2 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp20 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp26 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask)
tmp36 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp38 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp41 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp44 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask)
tmp54 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp56 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask)
tmp59 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask)
tmp62 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask)
tmp1 = tl_math.abs(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = tl_math.abs(tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = tmp11 < tmp10
tmp13 = tmp12.to(tl.int8)
tmp14 = tmp10 < tmp11
tmp15 = tmp14.to(tl.int8)
tmp16 = tmp13 - tmp15
tmp17 = tmp16.to(tmp10.dtype)
tmp19 = tl_math.abs(tmp18)
tmp21 = tl_math.abs(tmp20)
tmp22 = triton_helpers.maximum(tmp19, tmp21)
tmp24 = tl_math.abs(tmp23)
tmp25 = triton_helpers.maximum(tmp22, tmp24)
tmp27 = tl_math.abs(tmp26)
tmp28 = triton_helpers.maximum(tmp25, tmp27)
tmp29 = tmp11 < tmp28
tmp30 = tmp29.to(tl.int8)
tmp31 = tmp28 < tmp11
tmp32 = tmp31.to(tl.int8)
tmp33 = tmp30 - tmp32
tmp34 = tmp33.to(tmp28.dtype)
tmp35 = tmp17 + tmp34
tmp37 = tl_math.abs(tmp36)
tmp39 = tl_math.abs(tmp38)
tmp40 = triton_helpers.maximum(tmp37, tmp39)
tmp42 = tl_math.abs(tmp41)
tmp43 = triton_helpers.maximum(tmp40, tmp42)
tmp45 = tl_math.abs(tmp44)
tmp46 = triton_helpers.maximum(tmp43, tmp45)
tmp47 = tmp11 < tmp46
tmp48 = tmp47.to(tl.int8)
tmp49 = tmp46 < tmp11
tmp50 = tmp49.to(tl.int8)
tmp51 = tmp48 - tmp50
tmp52 = tmp51.to(tmp46.dtype)
tmp53 = tmp35 + tmp52
tmp55 = tl_math.abs(tmp54)
tmp57 = tl_math.abs(tmp56)
tmp58 = triton_helpers.maximum(tmp55, tmp57)
tmp60 = tl_math.abs(tmp59)
tmp61 = triton_helpers.maximum(tmp58, tmp60)
tmp63 = tl_math.abs(tmp62)
tmp64 = triton_helpers.maximum(tmp61, tmp63)
tmp65 = tmp11 < tmp64
tmp66 = tmp65.to(tl.int8)
tmp67 = tmp64 < tmp11
tmp68 = tmp67.to(tl.int8)
tmp69 = tmp66 - tmp68
tmp70 = tmp69.to(tmp64.dtype)
tmp71 = tmp53 + tmp70
tmp72 = tmp71.to(tl.int32)
tmp73 = tl.full([1], 1, tl.int32)
tmp74 = tmp72 - tmp73
tmp75 = tmp74 < tmp11
tmp76 = tl.where(tmp75, tmp11, tmp74)
tl.store(out_ptr1 + (x2), tmp76, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rf/crff2v5g62gmwittje4cxykzyvx5ipq7hs2mh2ykeyfvspvt4gxy.py
# Topologically Sorted Source Nodes: [gathered], Original ATen: [aten.index]
# Source node to ATen node mapping:
# gathered => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%iota_default, %convert_element_type_1]), kwargs = {})
triton_poi_fused_index_1 = async_compile.triton('triton_poi_fused_index_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 16)
x0 = xindex % 16
x1 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 4")
tmp7 = tl.load(in_ptr1 + (x0 + (16*tmp5) + (64*x1)), xmask)
tl.store(out_ptr0 + (x4), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int32)
# Topologically Sorted Source Nodes: [abs_1, amax, used, length, length_1, sequence_length, setitem], Original ATen: [aten.abs, aten.amax, aten.sign, aten.sum, aten._to_copy, aten.sub, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0.run(arg0_1, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gathered], Original ATen: [aten.index]
triton_poi_fused_index_1.run(buf1, arg0_1, buf2, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
def sequence_length_3D(sequence: 'torch.Tensor') ->torch.Tensor:
used = torch.sign(torch.amax(torch.abs(sequence), dim=2))
length = torch.sum(used, 1)
length = length.int()
return length
class ReduceLast(torch.nn.Module):
def forward(self, inputs, mask=None):
batch_size = inputs.shape[0]
sequence_length = sequence_length_3D(inputs) - 1
sequence_length[sequence_length < 0] = 0
gathered = inputs[torch.arange(batch_size), sequence_length.type(
torch.int64)]
return gathered
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0(
in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp36 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp38 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp41 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp44 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp54 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp56 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp59 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp62 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp1 = tl_math.abs(tmp0)
tmp3 = tl_math.abs(tmp2)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = tl_math.abs(tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tl_math.abs(tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = tmp11 < tmp10
tmp13 = tmp12.to(tl.int8)
tmp14 = tmp10 < tmp11
tmp15 = tmp14.to(tl.int8)
tmp16 = tmp13 - tmp15
tmp17 = tmp16.to(tmp10.dtype)
tmp19 = tl_math.abs(tmp18)
tmp21 = tl_math.abs(tmp20)
tmp22 = triton_helpers.maximum(tmp19, tmp21)
tmp24 = tl_math.abs(tmp23)
tmp25 = triton_helpers.maximum(tmp22, tmp24)
tmp27 = tl_math.abs(tmp26)
tmp28 = triton_helpers.maximum(tmp25, tmp27)
tmp29 = tmp11 < tmp28
tmp30 = tmp29.to(tl.int8)
tmp31 = tmp28 < tmp11
tmp32 = tmp31.to(tl.int8)
tmp33 = tmp30 - tmp32
tmp34 = tmp33.to(tmp28.dtype)
tmp35 = tmp17 + tmp34
tmp37 = tl_math.abs(tmp36)
tmp39 = tl_math.abs(tmp38)
tmp40 = triton_helpers.maximum(tmp37, tmp39)
tmp42 = tl_math.abs(tmp41)
tmp43 = triton_helpers.maximum(tmp40, tmp42)
tmp45 = tl_math.abs(tmp44)
tmp46 = triton_helpers.maximum(tmp43, tmp45)
tmp47 = tmp11 < tmp46
tmp48 = tmp47.to(tl.int8)
tmp49 = tmp46 < tmp11
tmp50 = tmp49.to(tl.int8)
tmp51 = tmp48 - tmp50
tmp52 = tmp51.to(tmp46.dtype)
tmp53 = tmp35 + tmp52
tmp55 = tl_math.abs(tmp54)
tmp57 = tl_math.abs(tmp56)
tmp58 = triton_helpers.maximum(tmp55, tmp57)
tmp60 = tl_math.abs(tmp59)
tmp61 = triton_helpers.maximum(tmp58, tmp60)
tmp63 = tl_math.abs(tmp62)
tmp64 = triton_helpers.maximum(tmp61, tmp63)
tmp65 = tmp11 < tmp64
tmp66 = tmp65.to(tl.int8)
tmp67 = tmp64 < tmp11
tmp68 = tmp67.to(tl.int8)
tmp69 = tmp66 - tmp68
tmp70 = tmp69.to(tmp64.dtype)
tmp71 = tmp53 + tmp70
tmp72 = tmp71.to(tl.int32)
tmp73 = tl.full([1], 1, tl.int32)
tmp74 = tmp72 - tmp73
tmp75 = tmp74 < tmp11
tmp76 = tl.where(tmp75, tmp11, tmp74)
tl.store(out_ptr1 + x2, tmp76, xmask)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 16
x0 = xindex % 16
x1 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr1 + (x0 + 16 * tmp5 + 64 * x1), xmask)
tl.store(out_ptr0 + x4, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int32)
get_raw_stream(0)
triton_poi_fused__to_copy_abs_amax_index_put_lift_fresh_sign_sub_sum_0[
grid(16)](arg0_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_index_1[grid(256)](buf1, arg0_1, buf2, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del buf1
return buf2,
def sequence_length_3D(sequence: 'torch.Tensor') ->torch.Tensor:
used = torch.sign(torch.amax(torch.abs(sequence), dim=2))
length = torch.sum(used, 1)
length = length.int()
return length
class ReduceLastNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| jimthompson5802/ludwig | ReduceLast | false | 3,863 | [
"Apache-2.0"
] | 0 | 8a369328a3f839d9cdb3710be315952c7891d7c0 | https://github.com/jimthompson5802/ludwig/tree/8a369328a3f839d9cdb3710be315952c7891d7c0 | import torch
def sequence_length_3D(sequence: 'torch.Tensor') ->torch.Tensor:
used = torch.sign(torch.amax(torch.abs(sequence), dim=2))
length = torch.sum(used, 1)
length = length.int()
return length
class Model(torch.nn.Module):
def forward(self, inputs, mask=None):
batch_size = inputs.shape[0]
sequence_length = sequence_length_3D(inputs) - 1
sequence_length[sequence_length < 0] = 0
gathered = inputs[torch.arange(batch_size), sequence_length.type(
torch.int64)]
return gathered
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lh/clhtaboxxs526aw4bqcb7s6xoig5vzwco55tfg6waaga3ao3elgd.py
# Topologically Sorted Source Nodes: [pairdist], Original ATen: [aten.sub, aten.add, aten.norm]
# Source node to ATen node mapping:
# pairdist => add, pow_1, pow_2, sub, sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused_add_norm_sub_0 = async_compile.triton('triton_poi_fused_add_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + (x0), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2j/c2jwnancisvjd443gbqq3ciljzmj44l7zohweivmhyduk7bfi7no.py
# Topologically Sorted Source Nodes: [pow_1, mul, sub, sub_1, clamp, pow_2, mul_1, add, contrastive_loss], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.clamp, aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add_1
# clamp => clamp_min
# contrastive_loss => mean
# mul => mul
# mul_1 => mul_1
# pow_1 => pow_3
# pow_2 => pow_4
# sub => sub_1
# sub_1 => sub_2
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.5, %pow_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_clamp_mean_mul_pow_rsub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = 1.5
tmp7 = tmp6 - tmp1
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp5 * tmp10
tmp12 = tmp3 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pairdist], Original ATen: [aten.sub, aten.add, aten.norm]
stream0 = get_raw_stream(0)
triton_poi_fused_add_norm_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [pow_1, mul, sub, sub_1, clamp, pow_2, mul_1, add, contrastive_loss], Original ATen: [aten.pow, aten.mul, aten.rsub, aten.clamp, aten.add, aten.mean]
triton_per_fused_add_clamp_mean_mul_pow_rsub_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
def __init__(self, margin=1.5):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, weight):
pairdist = F.pairwise_distance(output1, output2)
contrastive_loss = torch.mean(weight * torch.pow(pairdist, 2) + (1 -
weight) * torch.pow(torch.clamp(self.margin - pairdist, min=0.0
), 2))
return contrastive_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = 1.5
tmp7 = tmp6 - tmp1
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp5 * tmp10
tmp12 = tmp3 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveLossNew(nn.Module):
def __init__(self, margin=1.5):
super(ContrastiveLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| kvswim/kv_jhu_cv | ContrastiveLoss | false | 3,864 | [
"MIT"
] | 0 | 2ddf7a9d497aef116a7c043157b8631cea45000d | https://github.com/kvswim/kv_jhu_cv/tree/2ddf7a9d497aef116a7c043157b8631cea45000d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, margin=1.5):
super().__init__()
self.margin = margin
def forward(self, output1, output2, weight):
pairdist = F.pairwise_distance(output1, output2)
contrastive_loss = torch.mean(weight * torch.pow(pairdist, 2) + (1 -
weight) * torch.pow(torch.clamp(self.margin - pairdist, min=0.0
), 2))
return contrastive_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
SigmoidCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bv/cbvp5u5kif72bdcqavxdzrj2w7nrgeddwxfpnbuaa5qdr5ycayqv.py
# Topologically Sorted Source Nodes: [element_loss, loss, loss_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.sum, aten.mean]
# Source node to ATen node mapping:
# element_loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# loss => sum_1
# loss_1 => mean
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_2, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp14 = tmp1 - tmp13
tmp16 = tmp14 * tmp15
tmp17 = triton_helpers.minimum(tmp5, tmp15)
tmp18 = tl_math.abs(tmp15)
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = libdevice.log1p(tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp16 - tmp22
tmp24 = tmp12 + tmp23
tmp26 = tmp1 - tmp25
tmp28 = tmp26 * tmp27
tmp29 = triton_helpers.minimum(tmp5, tmp27)
tmp30 = tl_math.abs(tmp27)
tmp31 = -tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = libdevice.log1p(tmp32)
tmp34 = tmp29 - tmp33
tmp35 = tmp28 - tmp34
tmp36 = tmp24 + tmp35
tmp38 = tmp1 - tmp37
tmp40 = tmp38 * tmp39
tmp41 = triton_helpers.minimum(tmp5, tmp39)
tmp42 = tl_math.abs(tmp39)
tmp43 = -tmp42
tmp44 = tl_math.exp(tmp43)
tmp45 = libdevice.log1p(tmp44)
tmp46 = tmp41 - tmp45
tmp47 = tmp40 - tmp46
tmp48 = tmp36 + tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 4.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp53, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [element_loss, loss, loss_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.sum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0.run(buf2, arg1_1, arg0_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from typing import List
from typing import Optional
from typing import Union
from torch import nn
class LogitsInputsMixin:
@classmethod
def get_loss_inputs(cls):
"""Maps loss to the desired predicted input type."""
return LOGITS
class SigmoidCrossEntropyLoss(nn.Module, LogitsInputsMixin):
def __init__(self, class_weights: 'Optional[Union[Tensor, List]]'=None,
**kwargs):
"""
Params:
class_weights: List or 1D tensor of length equal to number of classes.
"""
super().__init__()
if class_weights:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none',
pos_weight=torch.Tensor(class_weights))
else:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, preds: 'Tensor', target: 'Tensor') ->Tensor:
if preds.ndim != 2:
raise RuntimeError(
'SigmoidCrossEntropyLoss currently supported for 2D tensors.')
element_loss = self.loss_fn(preds.type(torch.float32), target.type(
torch.float32))
loss = torch.sum(element_loss, dim=1)
loss = torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import Tensor
from typing import List
from typing import Optional
from typing import Union
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp14 = tmp1 - tmp13
tmp16 = tmp14 * tmp15
tmp17 = triton_helpers.minimum(tmp5, tmp15)
tmp18 = tl_math.abs(tmp15)
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = libdevice.log1p(tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp16 - tmp22
tmp24 = tmp12 + tmp23
tmp26 = tmp1 - tmp25
tmp28 = tmp26 * tmp27
tmp29 = triton_helpers.minimum(tmp5, tmp27)
tmp30 = tl_math.abs(tmp27)
tmp31 = -tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = libdevice.log1p(tmp32)
tmp34 = tmp29 - tmp33
tmp35 = tmp28 - tmp34
tmp36 = tmp24 + tmp35
tmp38 = tmp1 - tmp37
tmp40 = tmp38 * tmp39
tmp41 = triton_helpers.minimum(tmp5, tmp39)
tmp42 = tl_math.abs(tmp39)
tmp43 = -tmp42
tmp44 = tl_math.exp(tmp43)
tmp45 = libdevice.log1p(tmp44)
tmp46 = tmp41 - tmp45
tmp47 = tmp40 - tmp46
tmp48 = tmp36 + tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 4.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0[grid(1)](
buf2, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class LogitsInputsMixin:
@classmethod
def get_loss_inputs(cls):
"""Maps loss to the desired predicted input type."""
return LOGITS
class SigmoidCrossEntropyLossNew(nn.Module, LogitsInputsMixin):
def __init__(self, class_weights: 'Optional[Union[Tensor, List]]'=None,
**kwargs):
"""
Params:
class_weights: List or 1D tensor of length equal to number of classes.
"""
super().__init__()
if class_weights:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none',
pos_weight=torch.Tensor(class_weights))
else:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| jimthompson5802/ludwig | SigmoidCrossEntropyLoss | false | 3,865 | [
"Apache-2.0"
] | 0 | 8a369328a3f839d9cdb3710be315952c7891d7c0 | https://github.com/jimthompson5802/ludwig/tree/8a369328a3f839d9cdb3710be315952c7891d7c0 | import torch
from torch import Tensor
from typing import List
from typing import Optional
from typing import Union
from torch import nn
class LogitsInputsMixin:
@classmethod
def get_loss_inputs(cls):
"""Maps loss to the desired predicted input type."""
return LOGITS
class Model(nn.Module, LogitsInputsMixin):
def __init__(self, class_weights: 'Optional[Union[Tensor, List]]'=None,
**kwargs):
"""
Params:
class_weights: List or 1D tensor of length equal to number of classes.
"""
super().__init__()
if class_weights:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none',
pos_weight=torch.Tensor(class_weights))
else:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, preds: 'Tensor', target: 'Tensor') ->Tensor:
if preds.ndim != 2:
raise RuntimeError(
'SigmoidCrossEntropyLoss currently supported for 2D tensors.')
element_loss = self.loss_fn(preds.type(torch.float32), target.type(
torch.float32))
loss = torch.sum(element_loss, dim=1)
loss = torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
Layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/66/c66izpnra647e5jefzjwyscrhjy5r6w3ulndngmcr4uo5fvdbhms.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cw/ccwggz4lxsy5z2az3r36xp2kcfwibvyhosokdicsgr57gstrbtxr.py
# Topologically Sorted Source Nodes: [B, y, relu], Original ATen: [aten.repeat, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# B => repeat_1
# relu => relu
# y => add
# Graph fragment:
# %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_2, [4, 1, 1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %repeat_1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_repeat_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_repeat_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_repeat_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_repeat_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [B, y, relu], Original ATen: [aten.repeat, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_repeat_threshold_backward_1.run(buf2, primals_2, buf3, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf2, buf3, reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class Layer(nn.Module):
def __init__(self, input_dim, output_dim, p, name=None):
super(Layer, self).__init__()
self.name = name
self.register_parameter(name='w', param=nn.Parameter(torch.empty(1,
input_dim, output_dim), requires_grad=True))
self.register_parameter(name='b', param=nn.Parameter(torch.full([1,
1, output_dim], 0.1), requires_grad=True))
self.p = p
with torch.no_grad():
nn.init.trunc_normal_(self.w, mean=0, std=0.1, a=-0.2, b=0.2)
def forward(self, x):
W = torch.tile(self.w, [self.p, 1, 1])
B = torch.tile(self.b, [self.p, 1, 1])
y = torch.matmul(x, W) + B
return F.relu(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'p': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_relu_repeat_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_repeat_threshold_backward_1[grid(256)](buf2,
primals_2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, buf3, reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0)
class LayerNew(nn.Module):
def __init__(self, input_dim, output_dim, p, name=None):
super(LayerNew, self).__init__()
self.name = name
self.register_parameter(name='w', param=nn.Parameter(torch.empty(1,
input_dim, output_dim), requires_grad=True))
self.register_parameter(name='b', param=nn.Parameter(torch.full([1,
1, output_dim], 0.1), requires_grad=True))
self.p = p
with torch.no_grad():
nn.init.trunc_normal_(self.w, mean=0, std=0.1, a=-0.2, b=0.2)
def forward(self, input_0):
primals_1 = self.w
primals_2 = self.b
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| kw-lee/VIforSDEs | Layer | false | 3,866 | [
"MIT"
] | 0 | dcba3832aaad0aebc921a3b0628c43046d651629 | https://github.com/kw-lee/VIforSDEs/tree/dcba3832aaad0aebc921a3b0628c43046d651629 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim, output_dim, p, name=None):
super().__init__()
self.name = name
self.register_parameter(name='w', param=nn.Parameter(torch.empty(1,
input_dim, output_dim), requires_grad=True))
self.register_parameter(name='b', param=nn.Parameter(torch.full([1,
1, output_dim], 0.1), requires_grad=True))
self.p = p
with torch.no_grad():
nn.init.trunc_normal_(self.w, mean=0, std=0.1, a=-0.2, b=0.2)
def forward(self, x):
W = torch.tile(self.w, [self.p, 1, 1])
B = torch.tile(self.b, [self.p, 1, 1])
y = torch.matmul(x, W) + B
return F.relu(y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3r/c3rfy3ljjc2bfodnr5gm65jr7ew6v6kno6w6jzahlupuqxbpvfkw.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/p5/cp5wuljbdcz2dl2xvl4imkn5wmtmrnbb7mnld5glztiqavldlheh.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vv/cvvhis67uzj3m3ebbd4sgghaemqhihabasphltk5wytqdd6fe74t.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5y/c5yhyv7emyc7i2ozpvns6tsiqcvdzktqqpohy4sedfe7aihkojch.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xj/cxjpr2ute76xkk7edg7qlvolks2ggx2xwbrttteralhmvd2xsktw.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {})
triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/b4/cb43jhxvcrefkhdp7ixdoh6nmvez5h55vhlzkxtasuovu5ru7pe5.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_5 => add_6, erf, mul_5, mul_6, mul_7
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {})
triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pu/cpuql3oz4hmaygynopg7lq7xhfiv7hr7pr4vyzhfpmw34jymdp7q.py
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add_2
# x_3 => add_3
# x_9 => add_7
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, ), (1, ))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu]
triton_poi_fused_gelu_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0)
del primals_12
return (buf19, buf8, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
if mask is not None:
mask = mask.bool()
attn = attn.masked_fill(~mask[:, None, None, :], float('-inf'))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x, mask=None):
_x, attn = self.attn(self.norm1(x), mask=mask)
x = x + self.drop_path(_x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, buf8, primals_3, primals_6, primals_7, reinterpret_tensor(
buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
if mask is not None:
mask = mask.bool()
attn = attn.masked_fill(~mask[:, None, None, :], float('-inf'))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_6 = self.attn.proj.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_12 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0], output[1]
| kris927b/ViLT | Block | false | 3,867 | [
"Apache-2.0"
] | 0 | db96f20ebc656f1995aa573cbcbca0fe31f55c42 | https://github.com/kris927b/ViLT/tree/db96f20ebc656f1995aa573cbcbca0fe31f55c42 | import torch
import torch.nn as nn
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
if mask is not None:
mask = mask.bool()
attn = attn.masked_fill(~mask[:, None, None, :], float('-inf'))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Model(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn
.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x, mask=None):
_x, attn = self.attn(self.norm1(x), mask=mask)
x = x + self.drop_path(_x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
encoder3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3r/c3rslv2exiiurkywxt4oxifqli4zlmoxh26mfj7b2xmxbacdj7n6.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/im/cimhbg2d5dyokflxdick6fp7bxrozyixlq7fktorhqhyhxxlqw27.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wa/cwaxgzlsyoq4nciniczgancxu2276femxorri42ynpeycltxtjyo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qg/cqg4z653mpzmif22rwtpmv42y4lbkkxhxjqguwoxl3wb6cn5fn7k.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2y/c2yttjp523555saydyimerku3g5abcwcq5npinqqc2qrabybcqtu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 524288
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/de/cdeln2tj32kp6x2644gzh2ys7hun75bqr36burzkblzjwgs4uqjx.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, 255.0), kwargs = {})
triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tmp1 = 0.00392156862745098
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp2, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/l3/cl3q252wgi3hxvxd3zbecpu3422axihexl7ccuy2abyg7ik2fyna.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out => convolution
# out_1 => constant_pad_nd
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%div, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_6 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 52272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 198) % 66
x1 = (xindex // 3) % 66
x3 = (xindex // 13068)
x4 = xindex % 198
x0 = xindex % 3
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-195) + x4 + (192*x2) + (12288*x3)), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + (x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tl.store(out_ptr0 + (x6), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lg/clgtqhalpmnf7uokg33ha3vgwam42nbz7toy6svnopug43m7hqlp.py
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_2 => convolution_1
# out_3 => relu
# out_4 => constant_pad_nd_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_7 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 2112) % 66
x1 = (xindex // 32) % 66
x3 = (xindex // 139392)
x4 = xindex % 2112
x0 = xindex % 32
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-2080) + x4 + (2048*x2) + (131072*x3)), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + (x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (x6), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hm/chm5iqtfasmjkzseox33t2dndy7unxmt5p33p4snkvydoeglz3mz.py
# Topologically Sorted Source Nodes: [out_5, pool1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_5 => convolution_2
# pool1 => relu_1
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jb/cjbfymhhk4q6semr2qdwcgrf5mffmc2jfkxk5av3nwbj3r5lm2tj.py
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_6 => getitem_1
# Graph fragment:
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 32
x2 = (xindex // 8192)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (32768*x2)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (32768*x2)), None)
tmp7 = tl.load(in_ptr0 + (16384 + x0 + (512*x1) + (32768*x2)), None)
tmp12 = tl.load(in_ptr0 + (16640 + x0 + (512*x1) + (32768*x2)), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xg/cxgrawlzbv3ixwjtl5jz2b745e3rmxs6f6ez2grthc4tuba3po3l.py
# Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.max_pool2d_with_indices, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_6 => _low_memory_max_pool2d_with_offsets
# out_7 => constant_pad_nd_2
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%getitem, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1183744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 8704) % 34
x1 = (xindex // 256) % 34
x0 = xindex % 256
x3 = (xindex // 295936)
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-33280) + x0 + (512*x1) + (32768*x2) + (1048576*x3)), tmp10, other=0.0)
tmp12 = tl.load(in_ptr0 + ((-33024) + x0 + (512*x1) + (32768*x2) + (1048576*x3)), tmp10, other=0.0)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr0 + ((-16896) + x0 + (512*x1) + (32768*x2) + (1048576*x3)), tmp10, other=0.0)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.load(in_ptr0 + ((-16640) + x0 + (512*x1) + (32768*x2) + (1048576*x3)), tmp10, other=0.0)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tl.store(out_ptr0 + (x6), tmp19, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/65/c65xi35xkv3gngbv7ir3zt444ef2ucucl7sgmrx37a6c6cn63kbm.py
# Topologically Sorted Source Nodes: [out_8, out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_10 => constant_pad_nd_3
# out_8 => convolution_3
# out_9 => relu_2
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu_2, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_11 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2367488
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 17408) % 34
x1 = (xindex // 512) % 34
x3 = (xindex // 591872)
x4 = xindex % 17408
x0 = xindex % 512
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-16896) + x4 + (16384*x2) + (524288*x3)), tmp10, other=0.0)
tmp12 = tl.load(in_ptr1 + (x0), tmp10, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (x6), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6glhgwq3r3erzhskbe5t27lp3hhialswo2amg7pwzftcf6cvhg.py
# Topologically Sorted Source Nodes: [out_11, pool2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_11 => convolution_4
# pool2 => relu_3
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/es/cesh2yb7gf6pzelxjhib4cmu37trrs4ynuuqrzt3losbflp5fos2.py
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_12 => getitem_3
# Graph fragment:
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = (xindex // 512) % 16
x2 = (xindex // 8192)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (1024*x1) + (32768*x2)), None)
tmp1 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (32768*x2)), None)
tmp7 = tl.load(in_ptr0 + (16384 + x0 + (1024*x1) + (32768*x2)), None)
tmp12 = tl.load(in_ptr0 + (16896 + x0 + (1024*x1) + (32768*x2)), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/g3/cg3fxr4aqrrfoa72cdhibt6fhrqoayixfbrcvokbxjfiyfdccgjj.py
# Topologically Sorted Source Nodes: [out_12, out_13], Original ATen: [aten.max_pool2d_with_indices, aten.constant_pad_nd]
# Source node to ATen node mapping:
# out_12 => _low_memory_max_pool2d_with_offsets_1
# out_13 => constant_pad_nd_4
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %constant_pad_nd_4 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%getitem_2, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_14 = async_compile.triton('triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 663552
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 9216) % 18
x1 = (xindex // 512) % 18
x0 = xindex % 512
x3 = (xindex // 165888)
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-33792) + x0 + (1024*x1) + (32768*x2) + (524288*x3)), tmp10, other=0.0)
tmp12 = tl.load(in_ptr0 + ((-33280) + x0 + (1024*x1) + (32768*x2) + (524288*x3)), tmp10, other=0.0)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr0 + ((-17408) + x0 + (1024*x1) + (32768*x2) + (524288*x3)), tmp10, other=0.0)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.load(in_ptr0 + ((-16896) + x0 + (1024*x1) + (32768*x2) + (524288*x3)), tmp10, other=0.0)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tl.store(out_ptr0 + (x6), tmp19, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lr/clrh4wthff4vzy7wxhk26tti2xbqzxqr7rehqns23z3kbk3q2zyj.py
# Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_14 => convolution_5
# out_15 => relu_4
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_15 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 1024
y1 = (yindex // 1024)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (1024*x2) + (262144*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (256*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (1024*x2) + (262144*y1)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/es/cesypybgzzoq6s6nox3idgljk44lfh2coyqrph4w37tep6bq5rsi.py
# Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_8 => convolution_3
# out_9 => relu_2
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gk/cgkbtscsdoi76kvfxxn32t6exrxsgg6w3gyc54eycdiamrhnbzeq.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_2 => convolution_1
# out_3 => relu
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_17 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_3, (3, ), (1, ))
assert_size_stride(primals_4, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (256, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (512, ), (1, ))
assert_size_stride(primals_10, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_11, (512, ), (1, ))
assert_size_stride(primals_12, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_13, (1024, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 96, 9, grid=grid(96, 9), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((256, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf2 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf2, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_10, buf3, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_10
buf4 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_12, buf4, 524288, 9, grid=grid(524288, 9), stream=stream0)
del primals_12
buf5 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
triton_poi_fused_div_5.run(primals_1, buf5, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 3, 64, 64), (12288, 1, 192, 3))
buf7 = empty_strided_cuda((4, 3, 66, 66), (13068, 1, 198, 3), torch.float32)
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_6.run(buf6, primals_3, buf7, 52272, grid=grid(52272), stream=stream0)
del buf6
del primals_3
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 1, 2112, 32), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_7.run(buf8, primals_5, buf9, 557568, grid=grid(557568), stream=stream0)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [out_5, pool1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf11, primals_7, 4194304, grid=grid(4194304), stream=stream0)
del primals_7
buf12 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.int8)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_9.run(buf11, buf12, 1048576, grid=grid(1048576), stream=stream0)
buf13 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256), torch.float32)
# Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.max_pool2d_with_indices, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_10.run(buf11, buf13, 1183744, grid=grid(1183744), stream=stream0)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 512, 32, 32), (524288, 1, 16384, 512))
buf15 = empty_strided_cuda((4, 512, 34, 34), (591872, 1, 17408, 512), torch.float32)
# Topologically Sorted Source Nodes: [out_8, out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_11.run(buf14, primals_9, buf15, 2367488, grid=grid(2367488), stream=stream0)
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 512, 32, 32), (524288, 1, 16384, 512))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [out_11, pool2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_12.run(buf17, primals_11, 2097152, grid=grid(2097152), stream=stream0)
del primals_11
buf18 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512), torch.int8)
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_13.run(buf17, buf18, 524288, grid=grid(524288), stream=stream0)
buf19 = empty_strided_cuda((4, 512, 18, 18), (165888, 1, 9216, 512), torch.float32)
# Topologically Sorted Source Nodes: [out_12, out_13], Original ATen: [aten.max_pool2d_with_indices, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_14.run(buf17, buf19, 663552, grid=grid(663552), stream=stream0)
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf21 = empty_strided_cuda((4, 1024, 16, 16), (262144, 256, 16, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.bool)
# Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf20, primals_13, buf21, buf22, 4096, 256, grid=grid(4096, 256), stream=stream0)
del buf20
del primals_13
buf23 = empty_strided_cuda((4, 512, 32, 32), (524288, 1, 16384, 512), torch.bool)
# Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_16.run(buf14, primals_9, buf23, 2097152, grid=grid(2097152), stream=stream0)
del buf14
del primals_9
buf24 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32), torch.bool)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_17.run(buf8, primals_5, buf24, 524288, grid=grid(524288), stream=stream0)
del buf8
del primals_5
return (buf21, primals_2, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf22, buf23, buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch
import torch.nn as nn
class encoder3(nn.Module):
def __init__(self, W, v2):
super(encoder3, self).__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, 0)
self.reflecPad1 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv2 = nn.Conv2d(3, 32 if v2 else int(64 * W), 3, 1, 0)
self.relu2 = nn.ReLU(inplace=True)
self.reflecPad3 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv3 = nn.Conv2d(32 if v2 else int(64 * W), int(64 * W), 3, 1, 0)
self.relu3 = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices
=False)
self.reflecPad4 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv4 = nn.Conv2d(int(64 * W), int(128 * W), 3, 1, 0)
self.relu4 = nn.ReLU(inplace=True)
self.reflecPad5 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv5 = nn.Conv2d(int(128 * W), int(128 * W), 3, 1, 0)
self.relu5 = nn.ReLU(inplace=True)
self.maxPool2 = nn.MaxPool2d(kernel_size=2, stride=2,
return_indices=False)
self.reflecPad6 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv6 = nn.Conv2d(int(128 * W), int(256 * W), 3, 1, 0)
self.relu6 = nn.ReLU(inplace=True)
def forward(self, x):
x = x / 255.0
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'W': 4, 'v2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = 0.00392156862745098
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp2, ymask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 52272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 198 % 66
x1 = xindex // 3 % 66
x3 = xindex // 13068
x4 = xindex % 198
x0 = xindex % 3
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-195 + x4 + 192 * x2 + 12288 * x3), tmp10 &
xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + x0, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tl.store(out_ptr0 + x6, tmp15, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 2112 % 66
x1 = xindex // 32 % 66
x3 = xindex // 139392
x4 = xindex % 2112
x0 = xindex % 32
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-2080 + x4 + 2048 * x2 + 131072 * x3), tmp10 &
xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + x0, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + x6, tmp17, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 32
x2 = xindex // 8192
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 32768 * x2), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 32768 * x2), None)
tmp7 = tl.load(in_ptr0 + (16384 + x0 + 512 * x1 + 32768 * x2), None)
tmp12 = tl.load(in_ptr0 + (16640 + x0 + 512 * x1 + 32768 * x2), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_10(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 34
x1 = xindex // 256 % 34
x0 = xindex % 256
x3 = xindex // 295936
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-33280 + x0 + 512 * x1 + 32768 * x2 +
1048576 * x3), tmp10, other=0.0)
tmp12 = tl.load(in_ptr0 + (-33024 + x0 + 512 * x1 + 32768 * x2 +
1048576 * x3), tmp10, other=0.0)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr0 + (-16896 + x0 + 512 * x1 + 32768 * x2 +
1048576 * x3), tmp10, other=0.0)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.load(in_ptr0 + (-16640 + x0 + 512 * x1 + 32768 * x2 +
1048576 * x3), tmp10, other=0.0)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tl.store(out_ptr0 + x6, tmp19, None)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_11(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 17408 % 34
x1 = xindex // 512 % 34
x3 = xindex // 591872
x4 = xindex % 17408
x0 = xindex % 512
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-16896 + x4 + 16384 * x2 + 524288 * x3),
tmp10, other=0.0)
tmp12 = tl.load(in_ptr1 + x0, tmp10, eviction_policy='evict_last',
other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + x6, tmp17, None)
@triton.jit
def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512 % 16
x2 = xindex // 8192
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 32768 * x2), None)
tmp1 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 32768 * x2), None)
tmp7 = tl.load(in_ptr0 + (16384 + x0 + 1024 * x1 + 32768 * x2), None)
tmp12 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 32768 * x2), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_14(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 9216 % 18
x1 = xindex // 512 % 18
x0 = xindex % 512
x3 = xindex // 165888
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-33792 + x0 + 1024 * x1 + 32768 * x2 +
524288 * x3), tmp10, other=0.0)
tmp12 = tl.load(in_ptr0 + (-33280 + x0 + 1024 * x1 + 32768 * x2 +
524288 * x3), tmp10, other=0.0)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr0 + (-17408 + x0 + 1024 * x1 + 32768 * x2 +
524288 * x3), tmp10, other=0.0)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.load(in_ptr0 + (-16896 + x0 + 1024 * x1 + 32768 * x2 +
524288 * x3), tmp10, other=0.0)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tl.store(out_ptr0 + x6, tmp19, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 1024
y1 = yindex // 1024
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 1024 * x2 + 262144 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 256 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 1024 * x2 + 262144 * y1), tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_3, (3,), (1,))
assert_size_stride(primals_4, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (256, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_13, (1024,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 9)](primals_4, buf0, 96, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((256, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_1[grid(8192, 9)](primals_6, buf1, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(131072, 9)](primals_8, buf2, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_10, buf3, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_4[grid(524288, 9)](primals_12, buf4, 524288, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf5 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_div_5[grid(12, 4096)](primals_1, buf5, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf6 = extern_kernels.convolution(buf5, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 3, 64, 64), (12288, 1, 192, 3))
buf7 = empty_strided_cuda((4, 3, 66, 66), (13068, 1, 198, 3), torch
.float32)
triton_poi_fused_constant_pad_nd_convolution_6[grid(52272)](buf6,
primals_3, buf7, 52272, XBLOCK=512, num_warps=4, num_stages=1)
del buf6
del primals_3
buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 1, 2112, 32),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_7[grid(557568)](buf8,
primals_5, buf9, 557568, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_8[grid(4194304)](buf11, primals_7,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(1048576)](buf11,
buf12, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_10[grid(
1183744)](buf11, buf13, 1183744, XBLOCK=1024, num_warps=4,
num_stages=1)
buf14 = extern_kernels.convolution(buf13, buf2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 512, 32, 32), (524288, 1, 16384, 512))
buf15 = empty_strided_cuda((4, 512, 34, 34), (591872, 1, 17408, 512
), torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_11[grid(2367488)](
buf14, primals_9, buf15, 2367488, XBLOCK=1024, num_warps=4,
num_stages=1)
buf16 = extern_kernels.convolution(buf15, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 512, 32, 32), (524288, 1, 16384, 512))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_12[grid(2097152)](buf17,
primals_11, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_13[grid(524288)](buf17,
buf18, 524288, XBLOCK=512, num_warps=8, num_stages=1)
buf19 = empty_strided_cuda((4, 512, 18, 18), (165888, 1, 9216, 512),
torch.float32)
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_14[grid(
663552)](buf17, buf19, 663552, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf21 = empty_strided_cuda((4, 1024, 16, 16), (262144, 256, 16, 1),
torch.float32)
buf22 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384,
1024), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(4096, 256)
](buf20, primals_13, buf21, buf22, 4096, 256, XBLOCK=32, YBLOCK
=32, num_warps=4, num_stages=1)
del buf20
del primals_13
buf23 = empty_strided_cuda((4, 512, 32, 32), (524288, 1, 16384, 512
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(2097152)](
buf14, primals_9, buf23, 2097152, XBLOCK=512, num_warps=8,
num_stages=1)
del buf14
del primals_9
buf24 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_17[grid(524288)](
buf8, primals_5, buf24, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf8
del primals_5
return (buf21, primals_2, buf0, buf1, buf2, buf3, buf4, buf5, buf7,
buf9, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf22, buf23,
buf24)
class encoder3New(nn.Module):
def __init__(self, W, v2):
super(encoder3New, self).__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, 0)
self.reflecPad1 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv2 = nn.Conv2d(3, 32 if v2 else int(64 * W), 3, 1, 0)
self.relu2 = nn.ReLU(inplace=True)
self.reflecPad3 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv3 = nn.Conv2d(32 if v2 else int(64 * W), int(64 * W), 3, 1, 0)
self.relu3 = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices
=False)
self.reflecPad4 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv4 = nn.Conv2d(int(64 * W), int(128 * W), 3, 1, 0)
self.relu4 = nn.ReLU(inplace=True)
self.reflecPad5 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv5 = nn.Conv2d(int(128 * W), int(128 * W), 3, 1, 0)
self.relu5 = nn.ReLU(inplace=True)
self.maxPool2 = nn.MaxPool2d(kernel_size=2, stride=2,
return_indices=False)
self.reflecPad6 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv6 = nn.Conv2d(int(128 * W), int(256 * W), 3, 1, 0)
self.relu6 = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| kamieen03/style-transfer-server | encoder3 | false | 3,868 | [
"BSD-2-Clause"
] | 0 | 91727ec62080215a0b870ce043faf0657137b84b | https://github.com/kamieen03/style-transfer-server/tree/91727ec62080215a0b870ce043faf0657137b84b | import torch
import torch.nn
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, W, v2):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, 0)
self.reflecPad1 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv2 = nn.Conv2d(3, 32 if v2 else int(64 * W), 3, 1, 0)
self.relu2 = nn.ReLU(inplace=True)
self.reflecPad3 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv3 = nn.Conv2d(32 if v2 else int(64 * W), int(64 * W), 3, 1, 0)
self.relu3 = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices
=False)
self.reflecPad4 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv4 = nn.Conv2d(int(64 * W), int(128 * W), 3, 1, 0)
self.relu4 = nn.ReLU(inplace=True)
self.reflecPad5 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv5 = nn.Conv2d(int(128 * W), int(128 * W), 3, 1, 0)
self.relu5 = nn.ReLU(inplace=True)
self.maxPool2 = nn.MaxPool2d(kernel_size=2, stride=2,
return_indices=False)
self.reflecPad6 = nn.ZeroPad2d((1, 1, 1, 1))
self.conv6 = nn.Conv2d(int(128 * W), int(256 * W), 3, 1, 0)
self.relu6 = nn.ReLU(inplace=True)
def forward(self, x):
x = x / 255.0
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4, 4]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xv/cxvk5cjocubb5sooxq477utoh4ypieadexokpcfknpbyhqkqpfmc.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# input_2 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.22916666666666666), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uc/cuc4ysopuao32bavjilaknjdnsbv5keq47d5maeo2kius4cooxo2.py
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# input_4 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.22916666666666666), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tx/ctx7of2xuxmqal3s7vzpeqvui4z7kco2rlkga7utjk5xkfvyis54.py
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# input_6 => gt_2, mul_2, where_2
# Graph fragment:
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_5, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.22916666666666666), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %view_5, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sb/csbbn2sf7v6eutogmmhksp5x7rwd7z3vppgfx2mbu7v3njic7bqh.py
# Topologically Sorted Source Nodes: [action], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# action => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_7,), kwargs = {})
triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (64, 128), (128, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 16384, grid=grid(16384), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 8192, grid=grid(8192), stream=stream0)
del buf3
del primals_5
buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, primals_7, buf7, buf8, 4096, grid=grid(4096), stream=stream0)
del buf6
del primals_7
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [action], Original ATen: [aten.tanh]
triton_poi_fused_tanh_3.run(buf10, primals_9, 256, grid=grid(256), stream=stream0)
del primals_9
return (buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 256), (256, 1), 0), buf4, reinterpret_tensor(buf5, (64, 128), (128, 1), 0), buf7, reinterpret_tensor(buf8, (64, 64), (64, 1), 0), buf10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from collections import OrderedDict
class Actor(nn.Module):
def __init__(self, state_size, action_size, actor_fc_sizes=[256, 128, 64]):
super(Actor, self).__init__()
sequence_dict_actor = OrderedDict()
sequence_dict_actor['fc0'] = nn.Linear(state_size, actor_fc_sizes[0])
sequence_dict_actor['fc_rrelu0'] = nn.RReLU()
for i, actor_fc_size in enumerate(actor_fc_sizes):
if i == len(actor_fc_sizes) - 1:
break
sequence_dict_actor['fc{}'.format(i + 1)] = nn.Linear(actor_fc_size
, actor_fc_sizes[i + 1])
sequence_dict_actor['fc_rrelu{}'.format(i + 1)] = nn.RReLU()
sequence_dict_actor['logit'] = nn.Linear(actor_fc_sizes[-1],
action_size)
self.fc_actor = nn.Sequential(sequence_dict_actor)
self.tanh = nn.Tanh()
def forward(self, common_res):
actor_res = self.fc_actor(common_res)
action = self.tanh(actor_res)
return action
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128), (128, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(16384)](buf0, primals_2, buf1,
buf2, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.float32)
triton_poi_fused_leaky_relu_1[grid(8192)](buf3, primals_5, buf4,
buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.
float32)
triton_poi_fused_leaky_relu_2[grid(4096)](buf6, primals_7, buf7,
buf8, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_7
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
triton_poi_fused_tanh_3[grid(256)](buf10, primals_9, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
return buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 256), (256, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 128), (128, 1), 0
), buf7, reinterpret_tensor(buf8, (64, 64), (64, 1), 0
), buf10, primals_8, primals_6, primals_4
class ActorNew(nn.Module):
def __init__(self, state_size, action_size, actor_fc_sizes=[256, 128, 64]):
super(ActorNew, self).__init__()
sequence_dict_actor = OrderedDict()
sequence_dict_actor['fc0'] = nn.Linear(state_size, actor_fc_sizes[0])
sequence_dict_actor['fc_rrelu0'] = nn.RReLU()
for i, actor_fc_size in enumerate(actor_fc_sizes):
if i == len(actor_fc_sizes) - 1:
break
sequence_dict_actor['fc{}'.format(i + 1)] = nn.Linear(actor_fc_size
, actor_fc_sizes[i + 1])
sequence_dict_actor['fc_rrelu{}'.format(i + 1)] = nn.RReLU()
sequence_dict_actor['logit'] = nn.Linear(actor_fc_sizes[-1],
action_size)
self.fc_actor = nn.Sequential(sequence_dict_actor)
self.tanh = nn.Tanh()
def forward(self, input_0):
primals_1 = self.fc_actor.fc0.weight
primals_2 = self.fc_actor.fc0.bias
primals_4 = self.fc_actor.fc1.weight
primals_5 = self.fc_actor.fc1.bias
primals_6 = self.fc_actor.fc2.weight
primals_7 = self.fc_actor.fc2.bias
primals_8 = self.fc_actor.logit.weight
primals_9 = self.fc_actor.logit.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| kurohi/deepreinforcement-udacity | Actor | false | 3,869 | [
"MIT"
] | 0 | ea8bfcce9a36ca41aa0d7595326b915a494ed5f2 | https://github.com/kurohi/deepreinforcement-udacity/tree/ea8bfcce9a36ca41aa0d7595326b915a494ed5f2 | import torch
import torch.nn as nn
from collections import OrderedDict
class Model(nn.Module):
def __init__(self, state_size, action_size, actor_fc_sizes=[256, 128, 64]):
super().__init__()
sequence_dict_actor = OrderedDict()
sequence_dict_actor['fc0'] = nn.Linear(state_size, actor_fc_sizes[0])
sequence_dict_actor['fc_rrelu0'] = nn.RReLU()
for i, actor_fc_size in enumerate(actor_fc_sizes):
if i == len(actor_fc_sizes) - 1:
break
sequence_dict_actor['fc{}'.format(i + 1)] = nn.Linear(actor_fc_size
, actor_fc_sizes[i + 1])
sequence_dict_actor['fc_rrelu{}'.format(i + 1)] = nn.RReLU()
sequence_dict_actor['logit'] = nn.Linear(actor_fc_sizes[-1],
action_size)
self.fc_actor = nn.Sequential(sequence_dict_actor)
self.tanh = nn.Tanh()
def forward(self, common_res):
actor_res = self.fc_actor(common_res)
action = self.tanh(actor_res)
return action
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fv/cfvetaedgv74kp75lxerv5neciwqnjmm4hos5n22v7vev4p4zw2o.py
# Topologically Sorted Source Nodes: [common_res_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# common_res_1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%addmm, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (260*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mk/cmk77m3r3ze2mrmpvia72szqtikcdfkcrmgskh6rpvudyzl5gsrn.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# input_2 => gt, mul, where
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_6), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.22916666666666666), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_2, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nl/cnlcz5j7iizcao57byyw72hysqynwd3ekkpet6aq7h37a2on7dgv.py
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# input_4 => gt_1, mul_1, where_1
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_8), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.22916666666666666), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wu/cwuygupncxbbcn43yrnkgdl4n4buyhccmnpzavr4e2cuitvgzow6.py
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# input_6 => gt_2, mul_2, where_2
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_10), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.22916666666666666), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_tensor, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 260), (260, 1))
assert_size_stride(primals_6, (256, ), (1, ))
assert_size_stride(primals_7, (128, 256), (256, 1))
assert_size_stride(primals_8, (128, ), (1, ))
assert_size_stride(primals_9, (64, 128), (128, 1))
assert_size_stride(primals_10, (64, ), (1, ))
assert_size_stride(primals_11, (1, 64), (64, 1))
assert_size_stride(primals_12, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 260), (260, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 256), (260, 1), 0) # alias
# Topologically Sorted Source Nodes: [common_res], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = reinterpret_tensor(buf2, (4, 4), (260, 1), 256) # alias
# Topologically Sorted Source Nodes: [common_res_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_4, buf1, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (260, 256), (1, 260), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_6, buf4, buf5, 1024, grid=grid(1024), stream=stream0)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 128), (1, 256), 0), out=buf6)
buf7 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, primals_8, buf7, buf8, 512, grid=grid(512), stream=stream0)
del buf6
del primals_8
buf9 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_9, (128, 64), (1, 128), 0), out=buf9)
buf10 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
buf11 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf9, primals_10, buf10, buf11, 256, grid=grid(256), stream=stream0)
del buf9
del primals_10
buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, buf11, reinterpret_tensor(primals_11, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf13)
del primals_12
return (buf13, primals_3, buf2, buf4, buf5, buf7, buf8, buf10, buf11, primals_11, primals_9, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, 260), (260, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from collections import OrderedDict
class Critic(nn.Module):
def __init__(self, state_size, action_size, critic_fc_sizes=[256, 128, 64]
):
super(Critic, self).__init__()
sequence_dict_critic = OrderedDict()
self.critic_first_layer = nn.Linear(state_size, critic_fc_sizes[0])
sequence_dict_critic['fc0'] = nn.Linear(critic_fc_sizes[0] +
action_size, critic_fc_sizes[0])
sequence_dict_critic['fc_rrelu0'] = nn.RReLU()
for i, critic_fc_size in enumerate(critic_fc_sizes):
if i == len(critic_fc_sizes) - 1:
break
sequence_dict_critic['fc{}'.format(i + 1)] = nn.Linear(
critic_fc_size, critic_fc_sizes[i + 1])
sequence_dict_critic['fc_rrelu{}'.format(i + 1)] = nn.RReLU()
sequence_dict_critic['logit'] = nn.Linear(critic_fc_sizes[-1], 1)
self.fc_critic = nn.Sequential(sequence_dict_critic)
def forward(self, common_res, action):
common_res = self.critic_first_layer(common_res)
common_res = nn.RReLU(common_res)
common_res = torch.cat((common_res.lower, action), dim=1)
value = self.fc_critic(common_res)
return value
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 260 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.22916666666666666
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 260), (260, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (128, 256), (256, 1))
assert_size_stride(primals_8, (128,), (1,))
assert_size_stride(primals_9, (64, 128), (128, 1))
assert_size_stride(primals_10, (64,), (1,))
assert_size_stride(primals_11, (1, 64), (64, 1))
assert_size_stride(primals_12, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 260), (260, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 256), (260, 1), 0)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 256), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = reinterpret_tensor(buf2, (4, 4), (260, 1), 256)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_4, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (260, 256), (
1, 260), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(1024)](buf3, primals_6, buf4,
buf5, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 128), (
1, 256), 0), out=buf6)
buf7 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(512)](buf6, primals_8, buf7,
buf8, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
del primals_8
buf9 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_9, (128, 64), (1,
128), 0), out=buf9)
buf10 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
buf11 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_leaky_relu_3[grid(256)](buf9, primals_10, buf10,
buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf9
del primals_10
buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_12, buf11, reinterpret_tensor(
primals_11, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf13)
del primals_12
return (buf13, primals_3, buf2, buf4, buf5, buf7, buf8, buf10, buf11,
primals_11, primals_9, primals_7, primals_5)
class CriticNew(nn.Module):
def __init__(self, state_size, action_size, critic_fc_sizes=[256, 128, 64]
):
super(CriticNew, self).__init__()
sequence_dict_critic = OrderedDict()
self.critic_first_layer = nn.Linear(state_size, critic_fc_sizes[0])
sequence_dict_critic['fc0'] = nn.Linear(critic_fc_sizes[0] +
action_size, critic_fc_sizes[0])
sequence_dict_critic['fc_rrelu0'] = nn.RReLU()
for i, critic_fc_size in enumerate(critic_fc_sizes):
if i == len(critic_fc_sizes) - 1:
break
sequence_dict_critic['fc{}'.format(i + 1)] = nn.Linear(
critic_fc_size, critic_fc_sizes[i + 1])
sequence_dict_critic['fc_rrelu{}'.format(i + 1)] = nn.RReLU()
sequence_dict_critic['logit'] = nn.Linear(critic_fc_sizes[-1], 1)
self.fc_critic = nn.Sequential(sequence_dict_critic)
def forward(self, input_0, input_1):
primals_1 = self.critic_first_layer.weight
primals_2 = self.critic_first_layer.bias
primals_5 = self.fc_critic.fc0.weight
primals_6 = self.fc_critic.fc0.bias
primals_7 = self.fc_critic.fc1.weight
primals_8 = self.fc_critic.fc1.bias
primals_9 = self.fc_critic.fc2.weight
primals_10 = self.fc_critic.fc2.bias
primals_11 = self.fc_critic.logit.weight
primals_12 = self.fc_critic.logit.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| kurohi/deepreinforcement-udacity | Critic | false | 3,870 | [
"MIT"
] | 0 | ea8bfcce9a36ca41aa0d7595326b915a494ed5f2 | https://github.com/kurohi/deepreinforcement-udacity/tree/ea8bfcce9a36ca41aa0d7595326b915a494ed5f2 | import torch
import torch.nn as nn
from collections import OrderedDict
class Model(nn.Module):
def __init__(self, state_size, action_size, critic_fc_sizes=[256, 128, 64]
):
super().__init__()
sequence_dict_critic = OrderedDict()
self.critic_first_layer = nn.Linear(state_size, critic_fc_sizes[0])
sequence_dict_critic['fc0'] = nn.Linear(critic_fc_sizes[0] +
action_size, critic_fc_sizes[0])
sequence_dict_critic['fc_rrelu0'] = nn.RReLU()
for i, critic_fc_size in enumerate(critic_fc_sizes):
if i == len(critic_fc_sizes) - 1:
break
sequence_dict_critic['fc{}'.format(i + 1)] = nn.Linear(
critic_fc_size, critic_fc_sizes[i + 1])
sequence_dict_critic['fc_rrelu{}'.format(i + 1)] = nn.RReLU()
sequence_dict_critic['logit'] = nn.Linear(critic_fc_sizes[-1], 1)
self.fc_critic = nn.Sequential(sequence_dict_critic)
def forward(self, common_res, action):
common_res = self.critic_first_layer(common_res)
common_res = nn.RReLU(common_res)
common_res = torch.cat((common_res.lower, action), dim=1)
value = self.fc_critic(common_res)
return value
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qj/cqj5yvjbbpdgfd7ow3ktvna43ezxa65xqqqkp5npgbvyayjqd4nq.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 86400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 6
x0 = xindex % 3600
x4 = (xindex // 3600)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7b/c7bzvhv2qr6xicnbqb3t7xqu2zvwjnq3bidit4l3mlc5uafk5g7x.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 21600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = (xindex // 30) % 30
x4 = (xindex // 900)
x3 = (xindex // 5400)
x5 = xindex % 5400
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x4)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + (5408*x3)), tmp6, xmask)
tl.store(out_ptr1 + (x5 + (5504*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sy/csyhslvzw3etswfcdc7kxu4qedv6uoivmetsslsqvshtwe5lhfp3.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 676) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/m7/cm77y46phzcdy2yei4tss3zu2fw3rfcpzz5sxrbmxjomu7thvmgw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x3 = (xindex // 13)
x2 = (xindex // 2704)
x4 = xindex % 2704
tmp0 = tl.load(in_ptr0 + ((2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (26 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (27 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4 + (2816*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x4 + (2720*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3l/c3loprzowqrxjnzolnvkzw4uopmffegsvyydxuel7mlq5sfif3h4.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 520
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2z/c2zd5wdakk3rn4qsoqkbactto4fyki4ynhbuhyh2k4avjrlcknb3.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 108
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fx/cfxohqtslc3dqrt5q76xjfzv42xcoegpnf4bk2xzfywnxhulltkz.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 54
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cn/ccnioxnprzdhqp7h46cm43h2txm5r7rjkic5tjmaa75hwkg5st3l.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_6 => tanh
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_7 = async_compile.triton('triton_poi_fused_tanh_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (520, 2704), (2704, 1))
assert_size_stride(primals_7, (520, ), (1, ))
assert_size_stride(primals_8, (108, 520), (520, 1))
assert_size_stride(primals_9, (108, ), (1, ))
assert_size_stride(primals_10, (54, 108), (108, 1))
assert_size_stride(primals_11, (54, ), (1, ))
assert_size_stride(primals_12, (4, 54), (54, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 86400, grid=grid(86400), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 30, 30), (5408, 900, 30, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 21600, grid=grid(21600), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 26, 26), (10816, 676, 26, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 43264, grid=grid(43264), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 13, 13), (2816, 169, 13, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 13, 13), (2720, 169, 13, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 10816, grid=grid(10816), stream=stream0)
buf8 = empty_strided_cuda((4, 520), (520, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 2704), (2720, 1), 0), reinterpret_tensor(primals_6, (2704, 520), (1, 2704), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 2080, grid=grid(2080), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 108), (108, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (520, 108), (1, 520), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 432, grid=grid(432), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 54), (54, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (108, 54), (1, 108), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_11, 216, grid=grid(216), stream=stream0)
del primals_11
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf13, reinterpret_tensor(primals_12, (54, 4), (1, 54), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.tanh]
triton_poi_fused_tanh_7.run(buf15, primals_13, 16, grid=grid(16), stream=stream0)
del primals_13
return (buf15, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 2704), (2720, 1), 0), buf9, buf11, buf13, buf15, primals_12, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((520, 2704), (2704, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((520, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((108, 520), (520, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((108, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((54, 108), (108, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((54, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 54), (54, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, act_dim):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(2704, 520)
self.fc2 = nn.Linear(520, 108)
self.fc3 = nn.Linear(108, 54)
self.fc4 = nn.Linear(54, act_dim)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = torch.tanh(self.fc4(x))
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'act_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 86400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 6
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 21600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x4 = xindex // 900
x3 = xindex // 5400
x5 = xindex % 5400
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 5408 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 5504 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x3 = xindex // 13
x2 = xindex // 2704
x4 = xindex % 2704
tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x3), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4 + 2816 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x4 + 2720 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 520
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 108
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 54
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_tanh_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (520, 2704), (2704, 1))
assert_size_stride(primals_7, (520,), (1,))
assert_size_stride(primals_8, (108, 520), (520, 1))
assert_size_stride(primals_9, (108,), (1,))
assert_size_stride(primals_10, (54, 108), (108, 1))
assert_size_stride(primals_11, (54,), (1,))
assert_size_stride(primals_12, (4, 54), (54, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(86400)](buf0, primals_2,
buf1, 86400, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 30, 30), (5408, 900, 30, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(21600)](buf1, buf2,
buf3, 21600, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 26, 26), (10816, 676, 26, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(43264)](buf5, primals_5,
43264, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 13, 13), (2816, 169, 13, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 16, 13, 13), (2720, 169, 13, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(10816)](buf5, buf6,
buf7, 10816, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 520), (520, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 2704), (2720, 1), 0),
reinterpret_tensor(primals_6, (2704, 520), (1, 2704), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(2080)](buf9, primals_7, 2080, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 108), (108, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (520, 108), (
1, 520), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(432)](buf11, primals_9, 432, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 54), (54, 1), torch.float32)
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (108, 54),
(1, 108), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(216)](buf13, primals_11, 216, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_12, (54, 4), (1,
54), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_tanh_7[grid(16)](buf15, primals_13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_13
return (buf15, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 2704), (2720, 1), 0), buf9,
buf11, buf13, buf15, primals_12, primals_10, primals_8, primals_6)
class ModelNew(nn.Module):
def __init__(self, act_dim):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(2704, 520)
self.fc2 = nn.Linear(520, 108)
self.fc3 = nn.Linear(108, 54)
self.fc4 = nn.Linear(54, act_dim)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_12 = self.fc4.weight
primals_13 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| krishanrana/robot_learning_algorithms | Model | false | 3,871 | [
"MIT"
] | 0 | 3e66c9bf44e81ff281195130c71bcc6ebdf5ccda | https://github.com/krishanrana/robot_learning_algorithms/tree/3e66c9bf44e81ff281195130c71bcc6ebdf5ccda | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, act_dim):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(2704, 520)
self.fc2 = nn.Linear(520, 108)
self.fc3 = nn.Linear(108, 54)
self.fc4 = nn.Linear(54, act_dim)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = torch.tanh(self.fc4(x))
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4]
|
SPP | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/vi/cvih4u25oo4wxubp2b2ew7b4rm3k7js5c5bl6pzxtrnzdkaxor7e.py
# Topologically Sorted Source Nodes: [x_1, x], Original ATen: [aten.max_pool2d_with_indices, aten.cat]
# Source node to ATen node mapping:
# x => cat
# x_1 => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [5, 5], [1, 1], [2, 2], [1, 1], False), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %getitem, %getitem_2, %getitem_4], 1), kwargs = {})
triton_poi_fused_cat_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_cat_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 26, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x7 = xindex
x3 = (xindex // 64)
x4 = xindex % 64
tmp116 = tl.load(in_ptr0 + (x7), xmask)
tmp0 = (-2) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-2) + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-10) + x7), tmp10 & xmask, other=float("-inf"))
tmp12 = (-1) + x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-9) + x7), tmp16 & xmask, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-8) + x7), tmp23 & xmask, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 1 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + ((-7) + x7), tmp30 & xmask, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = 2 + x0
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp5 & tmp36
tmp38 = tl.load(in_ptr0 + ((-6) + x7), tmp37 & xmask, other=float("-inf"))
tmp39 = triton_helpers.maximum(tmp38, tmp32)
tmp40 = (-1) + x1
tmp41 = tmp40 >= tmp1
tmp42 = tmp40 < tmp3
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp9
tmp45 = tl.load(in_ptr0 + ((-6) + x7), tmp44 & xmask, other=float("-inf"))
tmp46 = triton_helpers.maximum(tmp45, tmp39)
tmp47 = tmp43 & tmp15
tmp48 = tl.load(in_ptr0 + ((-5) + x7), tmp47 & xmask, other=float("-inf"))
tmp49 = triton_helpers.maximum(tmp48, tmp46)
tmp50 = tmp43 & tmp22
tmp51 = tl.load(in_ptr0 + ((-4) + x7), tmp50 & xmask, other=float("-inf"))
tmp52 = triton_helpers.maximum(tmp51, tmp49)
tmp53 = tmp43 & tmp29
tmp54 = tl.load(in_ptr0 + ((-3) + x7), tmp53 & xmask, other=float("-inf"))
tmp55 = triton_helpers.maximum(tmp54, tmp52)
tmp56 = tmp43 & tmp36
tmp57 = tl.load(in_ptr0 + ((-2) + x7), tmp56 & xmask, other=float("-inf"))
tmp58 = triton_helpers.maximum(tmp57, tmp55)
tmp59 = x1
tmp60 = tmp59 >= tmp1
tmp61 = tmp59 < tmp3
tmp62 = tmp60 & tmp61
tmp63 = tmp62 & tmp9
tmp64 = tl.load(in_ptr0 + ((-2) + x7), tmp63 & xmask, other=float("-inf"))
tmp65 = triton_helpers.maximum(tmp64, tmp58)
tmp66 = tmp62 & tmp15
tmp67 = tl.load(in_ptr0 + ((-1) + x7), tmp66 & xmask, other=float("-inf"))
tmp68 = triton_helpers.maximum(tmp67, tmp65)
tmp69 = tmp62 & tmp22
tmp70 = tl.load(in_ptr0 + (x7), tmp69 & xmask, other=float("-inf"))
tmp71 = triton_helpers.maximum(tmp70, tmp68)
tmp72 = tmp62 & tmp29
tmp73 = tl.load(in_ptr0 + (1 + x7), tmp72 & xmask, other=float("-inf"))
tmp74 = triton_helpers.maximum(tmp73, tmp71)
tmp75 = tmp62 & tmp36
tmp76 = tl.load(in_ptr0 + (2 + x7), tmp75 & xmask, other=float("-inf"))
tmp77 = triton_helpers.maximum(tmp76, tmp74)
tmp78 = 1 + x1
tmp79 = tmp78 >= tmp1
tmp80 = tmp78 < tmp3
tmp81 = tmp79 & tmp80
tmp82 = tmp81 & tmp9
tmp83 = tl.load(in_ptr0 + (2 + x7), tmp82 & xmask, other=float("-inf"))
tmp84 = triton_helpers.maximum(tmp83, tmp77)
tmp85 = tmp81 & tmp15
tmp86 = tl.load(in_ptr0 + (3 + x7), tmp85 & xmask, other=float("-inf"))
tmp87 = triton_helpers.maximum(tmp86, tmp84)
tmp88 = tmp81 & tmp22
tmp89 = tl.load(in_ptr0 + (4 + x7), tmp88 & xmask, other=float("-inf"))
tmp90 = triton_helpers.maximum(tmp89, tmp87)
tmp91 = tmp81 & tmp29
tmp92 = tl.load(in_ptr0 + (5 + x7), tmp91 & xmask, other=float("-inf"))
tmp93 = triton_helpers.maximum(tmp92, tmp90)
tmp94 = tmp81 & tmp36
tmp95 = tl.load(in_ptr0 + (6 + x7), tmp94 & xmask, other=float("-inf"))
tmp96 = triton_helpers.maximum(tmp95, tmp93)
tmp97 = 2 + x1
tmp98 = tmp97 >= tmp1
tmp99 = tmp97 < tmp3
tmp100 = tmp98 & tmp99
tmp101 = tmp100 & tmp9
tmp102 = tl.load(in_ptr0 + (6 + x7), tmp101 & xmask, other=float("-inf"))
tmp103 = triton_helpers.maximum(tmp102, tmp96)
tmp104 = tmp100 & tmp15
tmp105 = tl.load(in_ptr0 + (7 + x7), tmp104 & xmask, other=float("-inf"))
tmp106 = triton_helpers.maximum(tmp105, tmp103)
tmp107 = tmp100 & tmp22
tmp108 = tl.load(in_ptr0 + (8 + x7), tmp107 & xmask, other=float("-inf"))
tmp109 = triton_helpers.maximum(tmp108, tmp106)
tmp110 = tmp100 & tmp29
tmp111 = tl.load(in_ptr0 + (9 + x7), tmp110 & xmask, other=float("-inf"))
tmp112 = triton_helpers.maximum(tmp111, tmp109)
tmp113 = tmp100 & tmp36
tmp114 = tl.load(in_ptr0 + (10 + x7), tmp113 & xmask, other=float("-inf"))
tmp115 = triton_helpers.maximum(tmp114, tmp112)
tl.store(out_ptr0 + (x4 + (256*x3)), tmp115, xmask)
tl.store(out_ptr1 + (x4 + (256*x3)), tmp116, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wu/cwuiwj6jpv44elf77w3mkg3fk7wv6xwrv2wzcyipfdadqsgr6dzt.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %getitem, %getitem_2, %getitem_4], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (256*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
buf0 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 64) # alias
buf7 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_1, x], Original ATen: [aten.max_pool2d_with_indices, aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_max_pool2d_with_indices_0.run(arg0_1, buf0, buf7, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
buf1 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [9, 9], [1, 1], [4, 4])
buf2 = buf1[0]
del buf1
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
buf4 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [13, 13], [1, 1], [6, 6])
del arg0_1
buf5 = buf4[0]
del buf4
buf8 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 128) # alias
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, buf8, 256, grid=grid(256), stream=stream0)
del buf2
buf9 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 192) # alias
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf5, buf9, 256, grid=grid(256), stream=stream0)
del buf5
return (buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
class SPP(nn.Module):
"""
Spatial Pyramid Pooling
"""
def __init__(self):
super(SPP, self).__init__()
def forward(self, x):
x_1 = torch.nn.functional.max_pool2d(x, 5, stride=1, padding=2)
x_2 = torch.nn.functional.max_pool2d(x, 9, stride=1, padding=4)
x_3 = torch.nn.functional.max_pool2d(x, 13, stride=1, padding=6)
x = torch.cat([x, x_1, x_2, x_3], dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_max_pool2d_with_indices_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x7 = xindex
x3 = xindex // 64
x4 = xindex % 64
tmp116 = tl.load(in_ptr0 + x7, xmask)
tmp0 = -2 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -2 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-10 + x7), tmp10 & xmask, other=float('-inf'))
tmp12 = -1 + x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-9 + x7), tmp16 & xmask, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-8 + x7), tmp23 & xmask, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 1 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + (-7 + x7), tmp30 & xmask, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = 2 + x0
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp5 & tmp36
tmp38 = tl.load(in_ptr0 + (-6 + x7), tmp37 & xmask, other=float('-inf'))
tmp39 = triton_helpers.maximum(tmp38, tmp32)
tmp40 = -1 + x1
tmp41 = tmp40 >= tmp1
tmp42 = tmp40 < tmp3
tmp43 = tmp41 & tmp42
tmp44 = tmp43 & tmp9
tmp45 = tl.load(in_ptr0 + (-6 + x7), tmp44 & xmask, other=float('-inf'))
tmp46 = triton_helpers.maximum(tmp45, tmp39)
tmp47 = tmp43 & tmp15
tmp48 = tl.load(in_ptr0 + (-5 + x7), tmp47 & xmask, other=float('-inf'))
tmp49 = triton_helpers.maximum(tmp48, tmp46)
tmp50 = tmp43 & tmp22
tmp51 = tl.load(in_ptr0 + (-4 + x7), tmp50 & xmask, other=float('-inf'))
tmp52 = triton_helpers.maximum(tmp51, tmp49)
tmp53 = tmp43 & tmp29
tmp54 = tl.load(in_ptr0 + (-3 + x7), tmp53 & xmask, other=float('-inf'))
tmp55 = triton_helpers.maximum(tmp54, tmp52)
tmp56 = tmp43 & tmp36
tmp57 = tl.load(in_ptr0 + (-2 + x7), tmp56 & xmask, other=float('-inf'))
tmp58 = triton_helpers.maximum(tmp57, tmp55)
tmp59 = x1
tmp60 = tmp59 >= tmp1
tmp61 = tmp59 < tmp3
tmp62 = tmp60 & tmp61
tmp63 = tmp62 & tmp9
tmp64 = tl.load(in_ptr0 + (-2 + x7), tmp63 & xmask, other=float('-inf'))
tmp65 = triton_helpers.maximum(tmp64, tmp58)
tmp66 = tmp62 & tmp15
tmp67 = tl.load(in_ptr0 + (-1 + x7), tmp66 & xmask, other=float('-inf'))
tmp68 = triton_helpers.maximum(tmp67, tmp65)
tmp69 = tmp62 & tmp22
tmp70 = tl.load(in_ptr0 + x7, tmp69 & xmask, other=float('-inf'))
tmp71 = triton_helpers.maximum(tmp70, tmp68)
tmp72 = tmp62 & tmp29
tmp73 = tl.load(in_ptr0 + (1 + x7), tmp72 & xmask, other=float('-inf'))
tmp74 = triton_helpers.maximum(tmp73, tmp71)
tmp75 = tmp62 & tmp36
tmp76 = tl.load(in_ptr0 + (2 + x7), tmp75 & xmask, other=float('-inf'))
tmp77 = triton_helpers.maximum(tmp76, tmp74)
tmp78 = 1 + x1
tmp79 = tmp78 >= tmp1
tmp80 = tmp78 < tmp3
tmp81 = tmp79 & tmp80
tmp82 = tmp81 & tmp9
tmp83 = tl.load(in_ptr0 + (2 + x7), tmp82 & xmask, other=float('-inf'))
tmp84 = triton_helpers.maximum(tmp83, tmp77)
tmp85 = tmp81 & tmp15
tmp86 = tl.load(in_ptr0 + (3 + x7), tmp85 & xmask, other=float('-inf'))
tmp87 = triton_helpers.maximum(tmp86, tmp84)
tmp88 = tmp81 & tmp22
tmp89 = tl.load(in_ptr0 + (4 + x7), tmp88 & xmask, other=float('-inf'))
tmp90 = triton_helpers.maximum(tmp89, tmp87)
tmp91 = tmp81 & tmp29
tmp92 = tl.load(in_ptr0 + (5 + x7), tmp91 & xmask, other=float('-inf'))
tmp93 = triton_helpers.maximum(tmp92, tmp90)
tmp94 = tmp81 & tmp36
tmp95 = tl.load(in_ptr0 + (6 + x7), tmp94 & xmask, other=float('-inf'))
tmp96 = triton_helpers.maximum(tmp95, tmp93)
tmp97 = 2 + x1
tmp98 = tmp97 >= tmp1
tmp99 = tmp97 < tmp3
tmp100 = tmp98 & tmp99
tmp101 = tmp100 & tmp9
tmp102 = tl.load(in_ptr0 + (6 + x7), tmp101 & xmask, other=float('-inf'))
tmp103 = triton_helpers.maximum(tmp102, tmp96)
tmp104 = tmp100 & tmp15
tmp105 = tl.load(in_ptr0 + (7 + x7), tmp104 & xmask, other=float('-inf'))
tmp106 = triton_helpers.maximum(tmp105, tmp103)
tmp107 = tmp100 & tmp22
tmp108 = tl.load(in_ptr0 + (8 + x7), tmp107 & xmask, other=float('-inf'))
tmp109 = triton_helpers.maximum(tmp108, tmp106)
tmp110 = tmp100 & tmp29
tmp111 = tl.load(in_ptr0 + (9 + x7), tmp110 & xmask, other=float('-inf'))
tmp112 = triton_helpers.maximum(tmp111, tmp109)
tmp113 = tmp100 & tmp36
tmp114 = tl.load(in_ptr0 + (10 + x7), tmp113 & xmask, other=float('-inf'))
tmp115 = triton_helpers.maximum(tmp114, tmp112)
tl.store(out_ptr0 + (x4 + 256 * x3), tmp115, xmask)
tl.store(out_ptr1 + (x4 + 256 * x3), tmp116, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 256 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
buf0 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 64)
buf7 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 0)
get_raw_stream(0)
triton_poi_fused_cat_max_pool2d_with_indices_0[grid(256)](arg0_1,
buf0, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [9, 9
], [1, 1], [4, 4])
buf2 = buf1[0]
del buf1
buf4 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [13,
13], [1, 1], [6, 6])
del arg0_1
buf5 = buf4[0]
del buf4
buf8 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 128)
triton_poi_fused_cat_1[grid(256)](buf2, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
buf9 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 192)
triton_poi_fused_cat_1[grid(256)](buf5, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf10,
class SPPNew(nn.Module):
"""
Spatial Pyramid Pooling
"""
def __init__(self):
super(SPPNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ldylab/learning_yolo_family_with_pytorch | SPP | false | 3,872 | [
"MIT"
] | 0 | 63fd8d65e5ccd55c9ec124052bbcb040e0d9c549 | https://github.com/ldylab/learning_yolo_family_with_pytorch/tree/63fd8d65e5ccd55c9ec124052bbcb040e0d9c549 | import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
class Model(nn.Module):
"""
Spatial Pyramid Pooling
"""
def __init__(self):
super().__init__()
def forward(self, x):
x_1 = torch.nn.functional.max_pool2d(x, 5, stride=1, padding=2)
x_2 = torch.nn.functional.max_pool2d(x, 9, stride=1, padding=4)
x_3 = torch.nn.functional.max_pool2d(x, 13, stride=1, padding=6)
x = torch.cat([x, x_1, x_2, x_3], dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
_DynamicGates | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5l/c5l5ekrfmgr3qnr4agpwzt4jg7o426phqlvb4imwrtbcvhsn5ct2.py
# Topologically Sorted Source Nodes: [add, gates], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# gates => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_5), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 12), (12, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 12), (12, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, gates], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf2, buf1, primals_5, 768, grid=grid(768), stream=stream0)
del buf1
del primals_5
return (buf2, reinterpret_tensor(primals_4, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class _DynamicGates(nn.Module):
"""Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module"""
def __init__(self, cfg: 'Config', input_size: 'int'):
super(_DynamicGates, self).__init__()
self.cfg = cfg
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 3 * cfg
.hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 *
cfg.hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size))
self._reset_parameters()
def _reset_parameters(self):
"""Special initialization of certain model weights."""
nn.init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.cfg.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
if self.cfg.initial_forget_bias is not None:
self.bias.data[:self.cfg.hidden_size
] = self.cfg.initial_forget_bias
def forward(self, h: 'torch.Tensor', x_d: 'torch.Tensor'):
gates = h @ self.weight_hh + x_d @ self.weight_ih + self.bias
return gates
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(hidden_size=4, initial_forget_bias=4),
'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 12), (12, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 12), (12, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (12,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(768)](buf2, buf1, primals_5, 768,
XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_5
return buf2, reinterpret_tensor(primals_4, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
class _DynamicGatesNew(nn.Module):
"""Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module"""
def __init__(self, cfg: 'Config', input_size: 'int'):
super(_DynamicGatesNew, self).__init__()
self.cfg = cfg
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 3 * cfg
.hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 *
cfg.hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size))
self._reset_parameters()
def _reset_parameters(self):
"""Special initialization of certain model weights."""
nn.init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.cfg.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
if self.cfg.initial_forget_bias is not None:
self.bias.data[:self.cfg.hidden_size
] = self.cfg.initial_forget_bias
def forward(self, input_0, input_1):
primals_1 = self.weight_ih
primals_3 = self.weight_hh
primals_5 = self.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| kyleniemeyer/neuralhydrology | _DynamicGates | false | 3,873 | [
"BSD-3-Clause"
] | 0 | 440fda715c4f746a2d56b058b9af2f0e03c36aa0 | https://github.com/kyleniemeyer/neuralhydrology/tree/440fda715c4f746a2d56b058b9af2f0e03c36aa0 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
"""Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module"""
def __init__(self, cfg: 'Config', input_size: 'int'):
super().__init__()
self.cfg = cfg
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 3 * cfg
.hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 *
cfg.hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size))
self._reset_parameters()
def _reset_parameters(self):
"""Special initialization of certain model weights."""
nn.init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.cfg.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
if self.cfg.initial_forget_bias is not None:
self.bias.data[:self.cfg.hidden_size
] = self.cfg.initial_forget_bias
def forward(self, h: 'torch.Tensor', x_d: 'torch.Tensor'):
gates = h @ self.weight_hh + x_d @ self.weight_ih + self.bias
return gates
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(hidden_size=4, initial_forget_bias=4),
'input_size': 4}]
|
LIN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/um/cumfxxnuff67646tii7qigo5xu4wp3dwgamqagptzcoe6havnpgp.py
# Topologically Sorted Source Nodes: [ln_mean, ln_var, add_1, sqrt_1], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add_1 => add_1
# ln_mean => mean_1
# ln_var => var_1
# sqrt_1 => sqrt_1
# Graph fragment:
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1, 2, 3], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 1, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_1, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_mean_sqrt_var_0 = async_compile.triton('triton_per_fused_add_mean_sqrt_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sqrt_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5i/c5in4qum3v6uczba2ipqijwx2a55yoloa7pndazn53hbcswn6ktk.py
# Topologically Sorted Source Nodes: [in_mean, in_var, sub, add, sqrt, out_in, sub_1, out_ln, mul, sub_2, mul_1, out, mul_2, out_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add => add
# in_mean => mean
# in_var => var
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# out => add_2
# out_1 => add_3
# out_in => div
# out_ln => div_1
# sqrt => sqrt
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [2, 3]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %expand_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %expand_3), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp26
tmp33 = tmp0 - tmp32
tmp35 = tmp33 / tmp34
tmp36 = tmp31 * tmp35
tmp37 = tmp29 + tmp36
tmp39 = tmp37 * tmp38
tmp41 = tmp39 + tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp41, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf6 # reuse
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [ln_mean, ln_var, add_1, sqrt_1], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0.run(buf7, buf11, primals_1, 4, 64, grid=grid(4), stream=stream0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf3 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [in_mean, in_var, sub, add, sqrt, out_in, sub_1, out_ln, mul, sub_2, mul_1, out, mul_2, out_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul, aten.rsub]
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1.run(buf1, buf5, primals_1, primals_2, buf7, buf11, primals_3, primals_4, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_4
return (buf12, primals_1, primals_2, primals_3, buf1, buf5, buf7, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class LIN(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(LIN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.0)
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 -
self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * self.gamma.expand(input.shape[0], -1, -1, -1
) + self.beta.expand(input.shape[0], -1, -1, -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp26
tmp33 = tmp0 - tmp32
tmp35 = tmp33 / tmp34
tmp36 = tmp31 * tmp35
tmp37 = tmp29 + tmp36
tmp39 = tmp37 * tmp38
tmp41 = tmp39 + tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp41, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf6
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11,
primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1[grid(16)](buf1,
buf5, primals_1, primals_2, buf7, buf11, primals_3, primals_4,
buf12, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_4
return buf12, primals_1, primals_2, primals_3, buf1, buf5, buf7, buf11
class LINNew(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(LINNew, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.0)
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input_0):
primals_2 = self.rho
primals_3 = self.gamma
primals_4 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| ldzhangyu/photo2cartoon | LIN | false | 3,874 | [
"MIT"
] | 0 | d5b371e77e61018c28109db67e8306e5e6064800 | https://github.com/ldzhangyu/photo2cartoon/tree/d5b371e77e61018c28109db67e8306e5e6064800 | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_features, eps=1e-05):
super().__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.0)
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 -
self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * self.gamma.expand(input.shape[0], -1, -1, -1
) + self.beta.expand(input.shape[0], -1, -1, -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
GlobalAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# align_vectors => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# align_vectors => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ip/cip3p4ibqio6uu76ccsemd7wjusq5ptlow3dt2zxzouyuz2sqywf.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %primals_1], 2), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/f5/cf5pnuv5il7avsmzck3quom7r6zvcfuulsdwpzlv2epzfmcgqgwb.py
# Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_h_2 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + (x3), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u4/cu4fypgfipklcxtitafatnyqdaatx5tws6qfndqotcy4qivcph6d.py
# Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# align_vectors_2 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [c], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, primals_1, buf4, 128, grid=grid(128), stream=stream0)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf7, 64, grid=grid(64), stream=stream0)
del buf2
return (buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.cuda
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttention(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\\ | | /
.....
\\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$
* general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \\sum_{j=1}^{SeqLength}_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, coverage=False, attn_type='dot'):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, context, context_lengths=None, coverage=None):
"""
input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output.
context (FloatTensor): batch x src_len x dim: src hidden states
context_lengths (LongTensor): the source context lengths.
coverage (FloatTensor): None (not supported yet)
"""
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, sourceL_ = coverage.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
context += self.linear_cover(cover).view_as(context)
context = self.tanh(context)
align = self.score(input, context)
if context_lengths is not None:
mask = sequence_mask(context_lengths)
mask = mask.unsqueeze(1)
align.data.masked_fill_(1 - mask, -float('inf'))
align_vectors = self.sm(align.view(batch * targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
c = torch.bmm(align_vectors, context)
concat_c = torch.cat([c, input], 2).view(batch * targetL, dim * 2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
if self.attn_type in ['general', 'dot']:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x3, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttentionNew(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\\ | | /
.....
\\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$
* general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \\sum_{j=1}^{SeqLength}_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, coverage=False, attn_type='dot'):
super(GlobalAttentionNew, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input_0, input_1):
primals_3 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| kurianbenoy/QG-Net | GlobalAttention | false | 3,875 | [
"MIT"
] | 0 | 074c697530aaaa259a3e16467a020846b1085af1 | https://github.com/kurianbenoy/QG-Net/tree/074c697530aaaa259a3e16467a020846b1085af1 | import torch
import torch.nn as nn
import torch.cuda
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class Model(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\\ | | /
.....
\\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$
* general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \\sum_{j=1}^{SeqLength}_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, coverage=False, attn_type='dot'):
super().__init__()
self.dim = dim
self.attn_type = attn_type
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# ... truncated (>4000 chars) for memory efficiency |
mnistmodel_C | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nm/cnmglpj4qvkq45h3iildar75nv7wu3lqsl6x5zchzj23qrjx4f6e.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n3/cn3go4uqc7nn6zuli3ukaptfvgl5kg34wavx4oq4nfnqxrsyo4ba.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 900) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hg/chgk74xz276b4xnmdieuqi4n77fhp27aj43dy4yytv6ioqeiaylt.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_4 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (128, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 2097152, grid=grid(2097152), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 30, 30), (57600, 900, 30, 1))
buf3 = buf2; del buf2 # reuse
buf14 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf14, 230400, grid=grid(230400), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.native_dropout]
buf4 = torch.ops.aten.native_dropout.default(reinterpret_tensor(buf3, (25, 9216), (9216, 1), 0), 0.25, True)
del buf3
buf5 = buf4[0]
buf6 = buf4[1]
del buf4
buf7 = empty_strided_cuda((25, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0), out=buf7)
buf8 = buf7; del buf7 # reuse
buf13 = empty_strided_cuda((25, 128), (128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf8, primals_7, buf13, 3200, grid=grid(3200), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.native_dropout]
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.5, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf12)
del primals_9
return (buf12, primals_1, primals_3, primals_4, buf1, buf5, buf6, buf10, buf11, primals_8, buf13, primals_6, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 9216), (9216, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class mnistmodel_C(nn.Module):
def __init__(self):
super(mnistmodel_C, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=128, kernel_size
=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=5, stride=2)
self.dense1 = nn.Linear(in_features=12 * 12 * 64, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(-1, 12 * 12 * 64)
x = F.dropout(x, 0.25)
x = F.relu(self.dense1(x))
x = F.dropout(x, 0.5)
x = self.dense2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (128, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 128, 64, 64), (524288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2097152)](buf1, primals_2,
2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 30, 30), (57600, 900, 30, 1))
buf3 = buf2
del buf2
buf14 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(230400)](
buf3, primals_5, buf14, 230400, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_5
buf4 = torch.ops.aten.native_dropout.default(reinterpret_tensor(
buf3, (25, 9216), (9216, 1), 0), 0.25, True)
del buf3
buf5 = buf4[0]
buf6 = buf4[1]
del buf4
buf7 = empty_strided_cuda((25, 128), (128, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (9216, 128),
(1, 9216), 0), out=buf7)
buf8 = buf7
del buf7
buf13 = empty_strided_cuda((25, 128), (128, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(3200)](buf8,
primals_7, buf13, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.5, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf12)
del primals_9
return (buf12, primals_1, primals_3, primals_4, buf1, buf5, buf6, buf10,
buf11, primals_8, buf13, primals_6, buf14)
class mnistmodel_CNew(nn.Module):
def __init__(self):
super(mnistmodel_CNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=128, kernel_size
=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=5, stride=2)
self.dense1 = nn.Linear(in_features=12 * 12 * 64, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.dense1.weight
primals_7 = self.dense1.bias
primals_8 = self.dense2.weight
primals_9 = self.dense2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| layel2/layyer-lib | mnistmodel_C | false | 3,876 | [
"MIT"
] | 0 | db48b5c38098ee93d2d34693d98e5ef4d319d919 | https://github.com/layel2/layyer-lib/tree/db48b5c38098ee93d2d34693d98e5ef4d319d919 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=128, kernel_size
=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=128, out_channels=64,
kernel_size=5, stride=2)
self.dense1 = nn.Linear(in_features=12 * 12 * 64, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(-1, 12 * 12 * 64)
x = F.dropout(x, 0.25)
x = F.relu(self.dense1(x))
x = F.dropout(x, 0.5)
x = self.dense2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
mnistmodel_B | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ln/cln6qydwyqg4qnxsbkqnv3hx3efw2a7vyiqfft6cddbhbrgdvyyu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5q/c5qrspwskvpybawg5gxji2xsvpkstvwbi7pblvjxn3j2hjj6m6iu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rd/crd2lz2omjetksd56mqg7z24ugnptlivqttmc5krm2ugh2o5row4.py
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_2, %primals_3, [2, 2], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 1024], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 256
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (1024*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + (64*x2) + (65536*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lh/clh4wf5jcaxrxls2d3u6uxqdkyap5gbjiinfzqhnswpp62hx2nrx.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rb/crbjudhw7davtguzkduaozz3aajdwdmgps5uyhn4stbb7gucdfp5.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 100
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = (yindex // 128)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (128*x2) + (12800*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (100*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + (128*x2) + (12800*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fn/cfnxv6esgaeh4dktwnbnxoyzto2s6wk7svwkiulqvv5ggiiwppbj.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_5 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (64, 1, 8, 8), (64, 64, 8, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (128, 64, 6, 6), (2304, 36, 6, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 6, 6), (2304, 1, 384, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 8192, 36, grid=grid(8192, 36), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 16384, 25, grid=grid(16384, 25), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_dropout]
buf2 = torch.ops.aten.native_dropout.default(primals_1, 0.2, True)
del primals_1
buf3 = buf2[0]
del buf2
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf3, primals_2, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf6 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_3, buf6, 256, 1024, grid=grid(256, 1024), stream=stream0)
del buf5
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 128, 14, 14), (25088, 1, 1792, 128))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf8, primals_5, 100352, grid=grid(100352), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 128, 10, 10), (12800, 1, 1280, 128))
buf10 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32)
buf18 = empty_strided_cuda((4, 128, 10, 10), (12800, 1, 1280, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_4.run(buf9, primals_7, buf10, buf18, 512, 100, grid=grid(512, 100), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf9, (400, 128), (128, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf10, (400, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
buf17 = empty_strided_cuda((400, 128), (128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf12, primals_9, buf17, 51200, grid=grid(51200), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.relu, aten.native_dropout]
buf13 = torch.ops.aten.native_dropout.default(buf12, 0.5, True)
del buf12
buf14 = buf13[0]
buf15 = buf13[1]
del buf13
buf16 = empty_strided_cuda((400, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf14, reinterpret_tensor(primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf16)
del primals_11
return (buf16, primals_2, buf0, buf1, buf3, buf6, buf8, reinterpret_tensor(buf10, (400, 128), (128, 1), 0), buf14, buf15, primals_10, buf17, primals_8, buf18, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 1, 8, 8), (64, 64, 8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64, 6, 6), (2304, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class mnistmodel_B(nn.Module):
def __init__(self):
super(mnistmodel_B, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=
8, stride=2, padding=3)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=6, stride=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=5, stride=1)
self.dense1 = nn.Linear(in_features=128, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
x = F.dropout(x, 0.2)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 128)
x = F.relu(self.dense1(x))
x = F.dropout(x, 0.5)
x = self.dense2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 65536 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 512
xnumel = 100
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 12800 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 100 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 128 * x2 + 12800 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (64, 1, 8, 8), (64, 64, 8, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (128, 64, 6, 6), (2304, 36, 6, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 6, 6), (2304, 1, 384, 64),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(8192, 36)](primals_4, buf0, 8192, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_1[grid(16384, 25)](primals_6, buf1, 16384, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = torch.ops.aten.native_dropout.default(primals_1, 0.2, True)
del primals_1
buf3 = buf2[0]
del buf2
buf5 = extern_kernels.convolution(buf3, primals_2, stride=(2, 2),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf6 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(256, 1024)](buf5,
primals_3, buf6, 256, 1024, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf5
del primals_3
buf7 = extern_kernels.convolution(buf6, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 128, 14, 14), (25088, 1, 1792, 128))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_3[grid(100352)](buf8, primals_5,
100352, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 128, 10, 10), (12800, 1, 1280, 128))
buf10 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1),
torch.float32)
buf18 = empty_strided_cuda((4, 128, 10, 10), (12800, 1, 1280, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(512, 100)](
buf9, primals_7, buf10, buf18, 512, 100, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf9, (400, 128), (128, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (400, 128), (128, 1), 0
), reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=
buf11)
buf12 = buf11
del buf11
buf17 = empty_strided_cuda((400, 128), (128, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(51200)](buf12,
primals_9, buf17, 51200, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf13 = torch.ops.aten.native_dropout.default(buf12, 0.5, True)
del buf12
buf14 = buf13[0]
buf15 = buf13[1]
del buf13
buf16 = empty_strided_cuda((400, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf14, reinterpret_tensor(
primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf16)
del primals_11
return buf16, primals_2, buf0, buf1, buf3, buf6, buf8, reinterpret_tensor(
buf10, (400, 128), (128, 1), 0
), buf14, buf15, primals_10, buf17, primals_8, buf18
class mnistmodel_BNew(nn.Module):
def __init__(self):
super(mnistmodel_BNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=
8, stride=2, padding=3)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=6, stride=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=5, stride=1)
self.dense1 = nn.Linear(in_features=128, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.dense1.weight
primals_9 = self.dense1.bias
primals_10 = self.dense2.weight
primals_11 = self.dense2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| layel2/layyer-lib | mnistmodel_B | false | 3,877 | [
"MIT"
] | 0 | db48b5c38098ee93d2d34693d98e5ef4d319d919 | https://github.com/layel2/layyer-lib/tree/db48b5c38098ee93d2d34693d98e5ef4d319d919 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=
8, stride=2, padding=3)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=6, stride=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=5, stride=1)
self.dense1 = nn.Linear(in_features=128, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
x = F.dropout(x, 0.2)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 128)
x = F.relu(self.dense1(x))
x = F.dropout(x, 0.5)
x = self.dense2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ni/cnivq3bf7dotfn57kbqzzb35lv7kje6na2klcsq34bx2f6goylw7.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yw/cyw4n6zt3gr3twraa4ssx3cfozpri5cwx5fhvzlyii3exkr24rpy.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => add, mul_2, rsqrt, sub, var_mean
# Graph fragment:
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 32
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (256*x3)), tmp27, None)
tl.store(out_ptr3 + (x3), tmp26, None)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ez/cezgddvh7totyxuz7u6iozt6s7dxc4paw4j3nzkp2ukqurkms3ct.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => add_1, mul_4, rsqrt_1, sub_1, var_mean_1
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %getitem_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 81
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (r2 + (81*x3)), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(rmask & xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(rmask & xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 81, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(rmask & xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp7 - tmp17
tmp25 = 81.0
tmp26 = tmp23 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tmp30 = tmp24 * tmp29
tl.store(in_out_ptr0 + (r2 + (81*x3)), tmp2, rmask & xmask)
tl.store(out_ptr2 + (r2 + (81*x3)), tmp30, rmask & xmask)
tl.store(out_ptr3 + (x3), tmp29, xmask)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cm/ccmuvskurepjmymfvxboonppqu3tiwev2ejvryeb2r2p5tq5lure.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => add_2, mul_6, rsqrt_2, sub_2, var_mean_2
# Graph fragment:
# %convolution_3 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%view_3, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %getitem_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_2), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_3(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 128
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp7 - tmp17
tmp25 = 16.0
tmp26 = tmp23 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tmp30 = tmp24 * tmp29
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp30, xmask)
tl.store(out_ptr3 + (x3), tmp29, xmask)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gf/cgfk4hjnbodl6dq542ea4g5jso4522kaxrpyqxr5ustrsaxckzl6.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# x_4 => convolution_4
# x_5 => sigmoid
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_5, %primals_10, %primals_11, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_sigmoid_4 = async_compile.triton('triton_poi_fused_convolution_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (8, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (16, 8, 2, 2), (32, 4, 2, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (1, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 32, 32), (4096, 1024, 32, 1))
buf1 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 16384, grid=grid(16384), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 8, 16, 16), (2048, 256, 16, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf9 = empty_strided_cuda((1, 32, 16, 16), (8192, 256, 16, 1), torch.float32)
buf8 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_1.run(buf4, primals_5, buf5, buf9, buf8, 32, 256, grid=grid(32), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 8, 16, 16), (2048, 256, 16, 1), 0), primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 9, 9), (1296, 81, 9, 1))
buf11 = buf10; del buf10 # reuse
buf12 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf16 = empty_strided_cuda((1, 64, 9, 9), (5184, 81, 9, 1), torch.float32)
buf15 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_2.run(buf11, primals_7, buf12, buf16, buf15, 64, 81, grid=grid(64), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(reinterpret_tensor(buf16, (4, 16, 9, 9), (1296, 81, 9, 1), 0), primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 32, 4, 4), (512, 16, 4, 1))
buf18 = buf17; del buf17 # reuse
buf19 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf23 = empty_strided_cuda((1, 128, 4, 4), (2048, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_3.run(buf18, primals_9, buf19, buf23, buf22, 128, 16, grid=grid(128), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 32, 4, 4), (512, 16, 4, 1), 0), primals_10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 1, 2, 2), (4, 4, 2, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_4.run(buf25, primals_11, 16, grid=grid(16), stream=stream0)
del primals_11
return (buf25, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf4, reinterpret_tensor(buf8, (32, ), (1, ), 0), reinterpret_tensor(buf9, (4, 8, 16, 16), (2048, 256, 16, 1), 0), buf11, reinterpret_tensor(buf15, (64, ), (1, ), 0), reinterpret_tensor(buf16, (4, 16, 9, 9), (1296, 81, 9, 1), 0), buf18, reinterpret_tensor(buf22, (128, ), (1, ), 0), reinterpret_tensor(buf23, (4, 32, 4, 4), (512, 16, 4, 1), 0), buf25, reinterpret_tensor(buf19, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf12, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf5, (1, 32, 1, 1), (32, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 8, 2, 2), (32, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 16, 2, 2), (64, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class discriminator(nn.Module):
def __init__(self):
super(discriminator, self).__init__()
self.d1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=2,
stride=2, padding=0)
self.d2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=2,
stride=2, padding=0)
self.d3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=2,
stride=2, padding=1)
self.d4 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2,
stride=2, padding=0)
self.val = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=4,
stride=2, padding=1)
def forward(self, x):
x = F.leaky_relu(self.d1(x), 0.2)
x = F.instance_norm(F.leaky_relu(self.d2(x), 0.2))
x = F.instance_norm(F.leaky_relu(self.d3(x), 0.2))
x = F.instance_norm(F.leaky_relu(self.d4(x), 0.2))
x = self.val(x)
x = F.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 256 * x3), tmp27, None)
tl.store(out_ptr3 + x3, tmp26, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 64
rnumel = 81
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (r2 + 81 * x3), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(rmask & xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 81, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(rmask & xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp7 - tmp17
tmp25 = 81.0
tmp26 = tmp23 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tmp30 = tmp24 * tmp29
tl.store(in_out_ptr0 + (r2 + 81 * x3), tmp2, rmask & xmask)
tl.store(out_ptr2 + (r2 + 81 * x3), tmp30, rmask & xmask)
tl.store(out_ptr3 + x3, tmp29, xmask)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_3(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 128
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tl.where(xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp7 - tmp17
tmp25 = 16.0
tmp26 = tmp23 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tmp30 = tmp24 * tmp29
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp30, xmask)
tl.store(out_ptr3 + x3, tmp29, xmask)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (8, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 8, 2, 2), (32, 4, 2, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (32, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (1, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 32, 32), (4096, 1024, 32, 1))
buf1 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(16384)](buf0,
primals_2, buf1, buf2, 16384, XBLOCK=256, num_warps=4, num_stages=1
)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 8, 16, 16), (2048, 256, 16, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
buf9 = empty_strided_cuda((1, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf8 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
triton_per_fused__native_batch_norm_legit_convolution_1[grid(32)](buf4,
primals_5, buf5, buf9, buf8, 32, 256, num_warps=2, num_stages=1)
del primals_5
buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 8,
16, 16), (2048, 256, 16, 1), 0), primals_6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 16, 9, 9), (1296, 81, 9, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf16 = empty_strided_cuda((1, 64, 9, 9), (5184, 81, 9, 1), torch.
float32)
buf15 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_convolution_2[grid(64)](buf11
, primals_7, buf12, buf16, buf15, 64, 81, XBLOCK=8, num_warps=8,
num_stages=1)
del primals_7
buf17 = extern_kernels.convolution(reinterpret_tensor(buf16, (4, 16,
9, 9), (1296, 81, 9, 1), 0), primals_8, stride=(2, 2), padding=
(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf17, (4, 32, 4, 4), (512, 16, 4, 1))
buf18 = buf17
del buf17
buf19 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf23 = empty_strided_cuda((1, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf22 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_convolution_3[grid(128)](
buf18, primals_9, buf19, buf23, buf22, 128, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_9
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 32,
4, 4), (512, 16, 4, 1), 0), primals_10, stride=(2, 2), padding=
(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0
), groups=1, bias=None)
assert_size_stride(buf24, (4, 1, 2, 2), (4, 4, 2, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_sigmoid_4[grid(16)](buf25, primals_11,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
return (buf25, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf2, buf4, reinterpret_tensor(buf8, (32,), (1,),
0), reinterpret_tensor(buf9, (4, 8, 16, 16), (2048, 256, 16, 1), 0),
buf11, reinterpret_tensor(buf15, (64,), (1,), 0),
reinterpret_tensor(buf16, (4, 16, 9, 9), (1296, 81, 9, 1), 0),
buf18, reinterpret_tensor(buf22, (128,), (1,), 0),
reinterpret_tensor(buf23, (4, 32, 4, 4), (512, 16, 4, 1), 0), buf25,
reinterpret_tensor(buf19, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf12, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf5, (1, 32, 1, 1), (32, 1, 1, 1), 0))
class discriminatorNew(nn.Module):
def __init__(self):
super(discriminatorNew, self).__init__()
self.d1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=2,
stride=2, padding=0)
self.d2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=2,
stride=2, padding=0)
self.d3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=2,
stride=2, padding=1)
self.d4 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2,
stride=2, padding=0)
self.val = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=4,
stride=2, padding=1)
def forward(self, input_0):
primals_1 = self.d1.weight
primals_2 = self.d1.bias
primals_4 = self.d2.weight
primals_5 = self.d2.bias
primals_6 = self.d3.weight
primals_7 = self.d3.bias
primals_8 = self.d4.weight
primals_9 = self.d4.bias
primals_10 = self.val.weight
primals_11 = self.val.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| layel2/layyer-lib | discriminator | false | 3,878 | [
"MIT"
] | 0 | db48b5c38098ee93d2d34693d98e5ef4d319d919 | https://github.com/layel2/layyer-lib/tree/db48b5c38098ee93d2d34693d98e5ef4d319d919 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.d1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=2,
stride=2, padding=0)
self.d2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=2,
stride=2, padding=0)
self.d3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=2,
stride=2, padding=1)
self.d4 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2,
stride=2, padding=0)
self.val = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=4,
stride=2, padding=1)
def forward(self, x):
x = F.leaky_relu(self.d1(x), 0.2)
x = F.instance_norm(F.leaky_relu(self.d2(x), 0.2))
x = F.instance_norm(F.leaky_relu(self.d3(x), 0.2))
x = F.instance_norm(F.leaky_relu(self.d4(x), 0.2))
x = self.val(x)
x = F.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
PartialConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/p3/cp3qleddjiuuytozrtebx5pzf2ycpwtw4mkq2jsx7qqswymv2bm6.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mv/cmvn76bzl7ybw2kvfc2lhvwgawc45yfp7nogxqi7x5o2n5qsklcs.py
# Topologically Sorted Source Nodes: [output, no_update_holes, mask_sum, sub, truediv, output_pre, output_1, new_mask, new_mask_1], Original ATen: [aten.convolution, aten.eq, aten.masked_fill, aten.sub, aten.div, aten.add, aten.ones_like]
# Source node to ATen node mapping:
# mask_sum => full_default, where
# new_mask => full_default_2
# new_mask_1 => where_2
# no_update_holes => eq
# output => convolution
# output_1 => full_default_1, where_1
# output_pre => add
# sub => sub
# truediv => div
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%convolution_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %convolution_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %expand), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %where), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %expand), kwargs = {})
# %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %add), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %full_default_2), kwargs = {})
triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1 = async_compile.triton('triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp4
tmp7 = 1.0
tmp8 = tl.where(tmp2, tmp7, tmp0)
tmp9 = tmp6 / tmp8
tmp10 = tmp9 + tmp4
tmp11 = tl.where(tmp2, tmp1, tmp10)
tmp12 = tl.where(tmp2, tmp1, tmp7)
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [output_mask], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_2
del primals_5
buf3 = buf1; del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, no_update_holes, mask_sum, sub, truediv, output_pre, output_1, new_mask, new_mask_1], Original ATen: [aten.convolution, aten.eq, aten.masked_fill, aten.sub, aten.div, aten.add, aten.ones_like]
triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1.run(buf3, buf2, primals_4, buf4, 16, grid=grid(16), stream=stream0)
del primals_4
return (buf3, buf4, primals_3, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
self.input_conv.apply(weights_init('kaiming'))
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input, mask):
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes, 1.0)
output_pre = (output - output_bias) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes, 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes, 0.0)
return output, new_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp4
tmp7 = 1.0
tmp8 = tl.where(tmp2, tmp7, tmp0)
tmp9 = tmp6 / tmp8
tmp10 = tmp9 + tmp4
tmp11 = tl.where(tmp2, tmp1, tmp10)
tmp12 = tl.where(tmp2, tmp1, tmp7)
tl.store(in_out_ptr0 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
del primals_2
del primals_5
buf3 = buf1
del buf1
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_add_convolution_div_eq_masked_fill_ones_like_sub_1[
grid(16)](buf3, buf2, primals_4, buf4, 16, XBLOCK=16, num_warps
=1, num_stages=1)
del primals_4
return buf3, buf4, primals_3, buf0, buf2
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class PartialConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
self.input_conv.apply(weights_init('kaiming'))
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input_0, input_1):
primals_1 = self.input_conv.weight
primals_4 = self.input_conv.bias
primals_2 = self.mask_conv.weight
primals_3 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| labcontext/image-inpainting-oldpaper | PartialConv | false | 3,879 | [
"Apache-2.0"
] | 0 | da4683a2c58d662e443ea24ab93fd9d8fcb96bda | https://github.com/labcontext/image-inpainting-oldpaper/tree/da4683a2c58d662e443ea24ab93fd9d8fcb96bda | import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
nn.init.normal_(m.weight, 0.0, 0.02)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
self.input_conv.apply(weights_init('kaiming'))
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, input, mask):
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes, 1.0)
output_pre = (output - output_bias) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes, 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes, 0.0)
return output, new_mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
mnistmodel_A | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n3/cn3go4uqc7nn6zuli3ukaptfvgl5kg34wavx4oq4nfnqxrsyo4ba.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 900) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hg/chgk74xz276b4xnmdieuqi4n77fhp27aj43dy4yytv6ioqeiaylt.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_4 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 30, 30), (57600, 900, 30, 1))
buf3 = buf2; del buf2 # reuse
buf14 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf14, 230400, grid=grid(230400), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_1, x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.native_dropout]
buf4 = torch.ops.aten.native_dropout.default(buf3, 0.25, True)
del buf3
buf5 = buf4[0]
buf6 = buf4[1]
del buf4
buf7 = empty_strided_cuda((25, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (25, 9216), (9216, 1), 0), reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0), out=buf7)
buf8 = buf7; del buf7 # reuse
buf13 = empty_strided_cuda((25, 128), (128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf8, primals_7, buf13, 3200, grid=grid(3200), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.native_dropout]
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.5, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf12)
del primals_9
return (buf12, primals_1, primals_3, primals_4, buf1, buf6, reinterpret_tensor(buf5, (25, 9216), (9216, 1), 0), buf10, buf11, primals_8, buf13, primals_6, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 9216), (9216, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class mnistmodel_A(nn.Module):
def __init__(self):
super(mnistmodel_A, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=
5, stride=1, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=5, stride=2)
self.dense1 = nn.Linear(in_features=64 * 12 * 12, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.dropout(x, 0.25)
x = x.view(-1, 64 * 12 * 12)
x = F.relu(self.dense1(x))
x = F.dropout(x, 0.5)
x = self.dense2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 9216), (9216, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 30, 30), (57600, 900, 30, 1))
buf3 = buf2
del buf2
buf14 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(230400)](
buf3, primals_5, buf14, 230400, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_5
buf4 = torch.ops.aten.native_dropout.default(buf3, 0.25, True)
del buf3
buf5 = buf4[0]
buf6 = buf4[1]
del buf4
buf7 = empty_strided_cuda((25, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (25, 9216), (9216, 1), 0
), reinterpret_tensor(primals_6, (9216, 128), (1, 9216), 0),
out=buf7)
buf8 = buf7
del buf7
buf13 = empty_strided_cuda((25, 128), (128, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(3200)](buf8,
primals_7, buf13, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf9 = torch.ops.aten.native_dropout.default(buf8, 0.5, True)
del buf8
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf12)
del primals_9
return (buf12, primals_1, primals_3, primals_4, buf1, buf6,
reinterpret_tensor(buf5, (25, 9216), (9216, 1), 0), buf10, buf11,
primals_8, buf13, primals_6, buf14)
class mnistmodel_ANew(nn.Module):
def __init__(self):
super(mnistmodel_ANew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=
5, stride=1, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=5, stride=2)
self.dense1 = nn.Linear(in_features=64 * 12 * 12, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.dense1.weight
primals_7 = self.dense1.bias
primals_8 = self.dense2.weight
primals_9 = self.dense2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| layel2/layyer-lib | mnistmodel_A | false | 3,880 | [
"MIT"
] | 0 | db48b5c38098ee93d2d34693d98e5ef4d319d919 | https://github.com/layel2/layyer-lib/tree/db48b5c38098ee93d2d34693d98e5ef4d319d919 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=
5, stride=1, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size
=5, stride=2)
self.dense1 = nn.Linear(in_features=64 * 12 * 12, out_features=128)
self.dense2 = nn.Linear(in_features=128, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.dropout(x, 0.25)
x = x.view(-1, 64 * 12 * 12)
x = F.relu(self.dense1(x))
x = F.dropout(x, 0.5)
x = self.dense2(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
discriminator2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4d/c4dgtligqaeimc4xcqvmprv4wzcppabu4nj2q5qfla6csif33edb.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dn/cdnf4lf4p3gpdg34piupn3w4vmbbkdetujzjvvrge2fltkwtspni.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4i/c4i5vaet34t34fxl3sxd6cmi5pcdyymvhpt7xvlymt65xk7wsq76.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => add, mul_2, rsqrt, sub, var_mean
# Graph fragment:
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[32, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 32
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 1024, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 1024.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (1024*x3)), tmp27, None)
tl.store(out_ptr3 + (x3), tmp26, None)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ug/cug7jf5vyab745xxewtsuswvbenvbnnefjmrlskwtfa5kz4uwn57.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pk/cpk2jnafx6iiyau5rmzwjjyxlxlxrrhmgkanol3nvkwroh4cww5b.py
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_4 => add_1, mul_4, rsqrt_1, sub_1, var_mean_1
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %getitem_7), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 64
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (256*x3)), tmp27, None)
tl.store(out_ptr3 + (x3), tmp26, None)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ec/cecfzmb7lcmubnp7axcg34e5wnqbt6562ckr2vzbwja7yvxfo3xj.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_8, getitem_9
# Graph fragment:
# %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yo/cyollwgo2o4o7lljknisgh53aczcft4nflalev37dbcoaxwsjh55.py
# Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_6 => add_2, mul_6, rsqrt_2, sub_2, var_mean_2
# Graph fragment:
# %convolution_3 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %getitem_11), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_2), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_6 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_6(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 128
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (r2 + (64*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp7 - tmp17
tmp25 = 64.0
tmp26 = tmp23 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tmp30 = tmp24 * tmp29
tl.store(in_out_ptr0 + (r2 + (64*x3)), tmp2, xmask)
tl.store(out_ptr2 + (r2 + (64*x3)), tmp30, xmask)
tl.store(out_ptr3 + (x3), tmp29, xmask)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/g6/cg67zbcxllhre6kailuhhhulp5ncvcka2n7exarkzuiemjgijwsh.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_7 => getitem_12, getitem_13
# Graph fragment:
# %getitem_12 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_13 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fv/cfvgalpzib5gobitus7buomb6dy26xvzqh3kwq7nqqi2c45xsdyi.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# x_8 => convolution_4
# x_9 => sigmoid
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_12, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_sigmoid_8 = async_compile.triton('triton_poi_fused_convolution_sigmoid_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 65536, grid=grid(65536), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf2, buf3, buf4, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 8, 32, 32), (8192, 1024, 32, 1))
buf6 = buf5; del buf5 # reuse
buf7 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf11 = empty_strided_cuda((1, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
buf10 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_2.run(buf6, primals_5, buf7, buf11, buf10, 32, 1024, grid=grid(32), stream=stream0)
del primals_5
buf12 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32)
buf13 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf11, buf12, buf13, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 16, 16), (4096, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
buf16 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf20 = empty_strided_cuda((1, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
buf19 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_4.run(buf15, primals_7, buf16, buf20, buf19, 64, 256, grid=grid(64), stream=stream0)
del primals_7
buf21 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32)
buf22 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf20, buf21, buf22, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf21, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 8, 8), (2048, 64, 8, 1))
buf24 = buf23; del buf23 # reuse
buf25 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf29 = empty_strided_cuda((1, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
buf28 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_6.run(buf24, primals_9, buf25, buf29, buf28, 128, 64, grid=grid(128), stream=stream0)
del primals_9
buf30 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32)
buf31 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf29, buf30, buf31, 2048, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf30, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1, 4, 4), (16, 16, 4, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_8.run(buf33, primals_11, 64, grid=grid(64), stream=stream0)
del primals_11
return (buf33, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf3, buf4, buf6, reinterpret_tensor(buf10, (32, ), (1, ), 0), reinterpret_tensor(buf11, (4, 8, 32, 32), (8192, 1024, 32, 1), 0), buf12, buf13, buf15, reinterpret_tensor(buf19, (64, ), (1, ), 0), reinterpret_tensor(buf20, (4, 16, 16, 16), (4096, 256, 16, 1), 0), buf21, buf22, buf24, reinterpret_tensor(buf28, (128, ), (1, ), 0), reinterpret_tensor(buf29, (4, 32, 8, 8), (2048, 64, 8, 1), 0), buf30, buf31, buf33, reinterpret_tensor(buf25, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf16, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf7, (1, 32, 1, 1), (32, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class discriminator2(nn.Module):
def __init__(self):
super().__init__()
self.d1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3,
stride=1, padding=1)
self.d2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3,
stride=1, padding=1)
self.d3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3,
stride=1, padding=1)
self.d4 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3,
stride=1, padding=1)
self.val = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1,
stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2)
def forward(self, x):
x = F.leaky_relu(self.d1(x), 0.2)
x = self.maxpool(x)
x = F.instance_norm(F.leaky_relu(self.d2(x), 0.2))
x = self.maxpool(x)
x = F.instance_norm(F.leaky_relu(self.d3(x), 0.2))
x = self.maxpool(x)
x = F.instance_norm(F.leaky_relu(self.d4(x), 0.2))
x = self.maxpool(x)
x = self.val(x)
x = F.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 1024, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 1024.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 1024 * x3), tmp27, None)
tl.store(out_ptr3 + x3, tmp26, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_4(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 256 * x3), tmp27, None)
tl.store(out_ptr3 + x3, tmp26, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_6(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 128
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tl.where(xmask, tmp8, 0)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp14 / tmp16
tmp18 = tmp8 - tmp17
tmp19 = tmp18 * tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.where(xmask, tmp20, 0)
tmp23 = tl.sum(tmp22, 1)[:, None]
tmp24 = tmp7 - tmp17
tmp25 = 64.0
tmp26 = tmp23 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tmp30 = tmp24 * tmp29
tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 64 * x3), tmp30, xmask)
tl.store(out_ptr3 + x3, tmp29, xmask)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_sigmoid_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(65536)](buf0,
primals_2, buf1, buf2, 65536, XBLOCK=512, num_warps=4, num_stages=1
)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(16384)](buf2, buf3,
buf4, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 8, 32, 32), (8192, 1024, 32, 1))
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
buf11 = empty_strided_cuda((1, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
buf10 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_per_fused__native_batch_norm_legit_convolution_2[grid(32)](buf6,
primals_5, buf7, buf11, buf10, 32, 1024, num_warps=8, num_stages=1)
del primals_5
buf12 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1),
torch.float32)
buf13 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(8192)](buf11, buf12,
buf13, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 16, 16), (4096, 256, 16, 1))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf20 = empty_strided_cuda((1, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
buf19 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_convolution_4[grid(64)](buf15
, primals_7, buf16, buf20, buf19, 64, 256, num_warps=2,
num_stages=1)
del primals_7
buf21 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.
float32)
buf22 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf20, buf21,
buf22, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf23 = extern_kernels.convolution(buf21, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 8, 8), (2048, 64, 8, 1))
buf24 = buf23
del buf23
buf25 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf29 = empty_strided_cuda((1, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
buf28 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_convolution_6[grid(128)](
buf24, primals_9, buf25, buf29, buf28, 128, 64, XBLOCK=8,
num_warps=4, num_stages=1)
del primals_9
buf30 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.
float32)
buf31 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(2048)](buf29, buf30,
buf31, 2048, XBLOCK=256, num_warps=4, num_stages=1)
buf32 = extern_kernels.convolution(buf30, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1, 4, 4), (16, 16, 4, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_sigmoid_8[grid(64)](buf33, primals_11,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
return (buf33, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf2, buf3, buf4, buf6, reinterpret_tensor(buf10,
(32,), (1,), 0), reinterpret_tensor(buf11, (4, 8, 32, 32), (8192,
1024, 32, 1), 0), buf12, buf13, buf15, reinterpret_tensor(buf19, (
64,), (1,), 0), reinterpret_tensor(buf20, (4, 16, 16, 16), (4096,
256, 16, 1), 0), buf21, buf22, buf24, reinterpret_tensor(buf28, (
128,), (1,), 0), reinterpret_tensor(buf29, (4, 32, 8, 8), (2048, 64,
8, 1), 0), buf30, buf31, buf33, reinterpret_tensor(buf25, (1, 128,
1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf16, (1, 64, 1, 1),
(64, 1, 1, 1), 0), reinterpret_tensor(buf7, (1, 32, 1, 1), (32, 1,
1, 1), 0))
class discriminator2New(nn.Module):
def __init__(self):
super().__init__()
self.d1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3,
stride=1, padding=1)
self.d2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3,
stride=1, padding=1)
self.d3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3,
stride=1, padding=1)
self.d4 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3,
stride=1, padding=1)
self.val = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1,
stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2)
def forward(self, input_0):
primals_1 = self.d1.weight
primals_2 = self.d1.bias
primals_4 = self.d2.weight
primals_5 = self.d2.bias
primals_6 = self.d3.weight
primals_7 = self.d3.bias
primals_8 = self.d4.weight
primals_9 = self.d4.bias
primals_10 = self.val.weight
primals_11 = self.val.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| layel2/layyer-lib | discriminator2 | false | 3,881 | [
"MIT"
] | 0 | db48b5c38098ee93d2d34693d98e5ef4d319d919 | https://github.com/layel2/layyer-lib/tree/db48b5c38098ee93d2d34693d98e5ef4d319d919 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.d1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3,
stride=1, padding=1)
self.d2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3,
stride=1, padding=1)
self.d3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3,
stride=1, padding=1)
self.d4 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3,
stride=1, padding=1)
self.val = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1,
stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2)
def forward(self, x):
x = F.leaky_relu(self.d1(x), 0.2)
x = self.maxpool(x)
x = F.instance_norm(F.leaky_relu(self.d2(x), 0.2))
x = self.maxpool(x)
x = F.instance_norm(F.leaky_relu(self.d3(x), 0.2))
x = self.maxpool(x)
x = F.instance_norm(F.leaky_relu(self.d4(x), 0.2))
x = self.maxpool(x)
x = self.val(x)
x = F.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
nnNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hb/chbhiaabilrxfybu2ffrrnkpdqbtolbw723vatlywaxvb4btkitd.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class nnNorm(nn.Module):
def __init__(self, dim=-1):
super().__init__()
self.dim = dim
def forward(self, x):
return F.normalize(x, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class nnNormNew(nn.Module):
def __init__(self, dim=-1):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| learning-group-structure/paper | nnNorm | false | 3,882 | [
"MIT"
] | 0 | 96abf7e25cb7e95f45d6eb025257c0ba9e22fc55 | https://github.com/learning-group-structure/paper/tree/96abf7e25cb7e95f45d6eb025257c0ba9e22fc55 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, dim=-1):
super().__init__()
self.dim = dim
def forward(self, x):
return F.normalize(x, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/mr/cmr3tv5ws2snvq6sc6sxr656wx47kzohgg6mk4czehbbxdzlihyt.py
# Topologically Sorted Source Nodes: [softmax, log_softmax], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax_1, sub_1
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {})
triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
tl.store(out_ptr1 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nk/cnk3zca2cotrn52zquetkd3446ncgzdqpvoo7a7vey3gzfcyn5ha.py
# Topologically Sorted Source Nodes: [softmax, log_softmax, b], Original ATen: [aten._softmax, aten._log_softmax, aten.mul]
# Source node to ATen node mapping:
# b => mul
# log_softmax => exp_1, log, sub_2, sum_2
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_2), kwargs = {})
triton_poi_fused__log_softmax__softmax_mul_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x3), xmask)
tmp10 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + (x3), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mp/cmpeytmhcba5c4ipd7umknv7ucx2ya645em2qrjxfafjjutkbyu6.py
# Topologically Sorted Source Nodes: [ne, mask], Original ATen: [aten.ne, aten._to_copy]
# Source node to ATen node mapping:
# mask => convert_element_type
# ne => ne
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, -1), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {})
triton_poi_fused__to_copy_ne_2 = async_compile.triton('triton_poi_fused__to_copy_ne_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_ne_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_ne_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -1.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/c4/cc4i4ehzppkefrzntmfvwa54v55o6fsxcfgd2ybxiqpc5tki2ssl.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/de/cdef5r4esgqj7h3qdiclpch3xr2uox6w4tmxuflot4vwdr4ea7xl.py
# Topologically Sorted Source Nodes: [b_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# b_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, -1.0), kwargs = {})
triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, log_softmax], Original ATen: [aten._softmax, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0.run(arg1_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, log_softmax, b], Original ATen: [aten._softmax, aten._log_softmax, aten.mul]
triton_poi_fused__log_softmax__softmax_mul_1.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [ne, mask], Original ATen: [aten.ne, aten._to_copy]
triton_poi_fused__to_copy_ne_2.run(arg0_1, buf3, 256, grid=grid(256), stream=stream0)
del arg0_1
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf2, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
del buf3
del buf4
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [b_1], Original ATen: [aten.mul]
triton_poi_fused_mul_4.run(buf6, 256, grid=grid(256), stream=stream0)
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class HLoss(nn.Module):
"""
Entropy loss used for entropy maximization.
"""
def __init__(self, ignore_index=-1):
super(HLoss, self).__init__()
self.ignore_index = ignore_index
def forward(self, x, labels):
mask = (labels != self.ignore_index).float()
b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = -1.0 * torch.matmul(mask, b.sum(dim=1))
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tl.store(out_ptr0 + x3, tmp23, xmask)
@triton.jit
def triton_poi_fused__to_copy_ne_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -1.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_1[grid(256)](buf0, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__to_copy_ne_2[grid(256)](arg0_1, buf3, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
buf4 = buf0
del buf0
triton_poi_fused_clone_3[grid(256)](buf2, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
del buf3
del buf4
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_mul_4[grid(256)](buf6, 256, XBLOCK=256, num_warps=
4, num_stages=1)
return buf6,
class HLossNew(nn.Module):
"""
Entropy loss used for entropy maximization.
"""
def __init__(self, ignore_index=-1):
super(HLossNew, self).__init__()
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lemon234071/oc_parlai | HLoss | false | 3,883 | [
"MIT"
] | 0 | 33a0e57c48e58903cb1666e367a7bb9ef012de0c | https://github.com/lemon234071/oc_parlai/tree/33a0e57c48e58903cb1666e367a7bb9ef012de0c | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Entropy loss used for entropy maximization.
"""
def __init__(self, ignore_index=-1):
super().__init__()
self.ignore_index = ignore_index
def forward(self, x, labels):
mask = (labels != self.ignore_index).float()
b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = -1.0 * torch.matmul(mask, b.sum(dim=1))
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
gen_ab_cf | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fj/cfjakbqxo5iyiprnyhk5pnlykjlvzwlxk7jzutbywsk77hzarjqp.py
# Topologically Sorted Source Nodes: [conv2d, d1, u3], Original ATen: [aten.convolution, aten.leaky_relu, aten.cat]
# Source node to ATen node mapping:
# conv2d => convolution
# d1 => gt, mul, where
# u3 => cat_2
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_9, %where], 1), kwargs = {})
triton_poi_fused_cat_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_cat_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x1 = (xindex // 4096) % 8
x2 = (xindex // 32768)
x3 = xindex % 32768
tmp0 = tl.load(in_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4), tmp4, None)
tl.store(out_ptr1 + (x4), tmp7, None)
tl.store(out_ptr2 + (x3 + (65536*x2)), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5z/c5z4ugnz2tz6etzq5gfjssvgajfzqbzizudkvgjbmhjkyjb3kwgl.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tv/ctvnu6bpqecufnttlfwmj2dn3kxu3ns6ifohf2tmqdwgrd7xlf5l.py
# Topologically Sorted Source Nodes: [conv2d_1, d2, u2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# d2 => add, mul_2, rsqrt, sub, var_mean
# u2 => cat_1
# Graph fragment:
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_7, %view_1], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_convolution_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 64
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 1024, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 1024.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (1024*x3)), tmp27, None)
tl.store(out_ptr3 + (r2 + (1024*x0) + (32768*x1)), tmp27, None)
tl.store(out_ptr4 + (x3), tmp26, None)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pw/cpwa7w2sf2mh2epi2372qi4mhvq3tcs5lmubo3et2g2uwsicveur.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vd/cvdopwqckc3jo2x7hgf6dv4sg6zplos7jdtq5olgwkjeeleblvyg.py
# Topologically Sorted Source Nodes: [conv2d_2, d3, u1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# d3 => add_1, mul_4, rsqrt_1, sub_1, var_mean_1
# u1 => cat
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %getitem_7), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_5, %view_3], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_convolution_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
x1 = (xindex // 32)
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (256*x3)), tmp27, None)
tl.store(out_ptr3 + (r2 + (256*x0) + (16384*x1)), tmp27, None)
tl.store(out_ptr4 + (x3), tmp26, None)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tu/ctuemxgrs2pnacvzflikr2y5c52xoe5mzmamo4to5oaym5b7ojwm.py
# Topologically Sorted Source Nodes: [encoder], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# encoder => getitem_9
# Graph fragment:
# %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lg/clgtx6ay7n33cnofxnacshd4bbemmwdtbqscisvkzfzgkphtzwc4.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_2 => add_2, add_3, convert_element_type, convert_element_type_1, iota, mul_5, mul_6
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_2, torch.float32), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_6 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3h/c3h66ox44l5w5s3fhaj7s6yo63xi6wkaynvn7nr7icmd3gdqe4hv.py
# Topologically Sorted Source Nodes: [encoder, x_2], Original ATen: [aten.max_pool2d_with_indices, aten._unsafe_index]
# Source node to ATen node mapping:
# encoder => _low_memory_max_pool2d_with_offsets_2
# x_2 => _unsafe_index
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%view_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%getitem_8, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused__unsafe_index_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_max_pool2d_with_indices_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + ((2*tmp8) + (32*tmp4) + (256*x2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (2*tmp8) + (32*tmp4) + (256*x2)), None, eviction_policy='evict_last')
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.load(in_ptr1 + (16 + (2*tmp8) + (32*tmp4) + (256*x2)), None, eviction_policy='evict_last')
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + (17 + (2*tmp8) + (32*tmp4) + (256*x2)), None, eviction_policy='evict_last')
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x4), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kd/ckdkfeo3ingoatacw3zytowozgw3gmmh57efwjyfx4my6i33eiav.py
# Topologically Sorted Source Nodes: [x_3, x_5, u1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
# Source node to ATen node mapping:
# u1 => cat
# x_3 => convolution_3
# x_5 => add_6, rsqrt_2, var_mean_2
# Graph fragment:
# %convolution_3 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_5, %view_3], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_convolution_8 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_8(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
x1 = (xindex // 32)
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (256*x0) + (16384*x1)), tmp27, None)
tl.store(out_ptr3 + (x3), tmp26, None)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/os/cos64vjywkkhomceeqz24mjju7brp7wa43m5dr4ratpsntpe5pjx.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_6 => add_7, add_8, convert_element_type_4, convert_element_type_5, iota_2, mul_11, mul_12
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_7, torch.float32), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_12, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_9 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_9(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/au/cau3acfcpk7o4csir7onz4mhaaj3rsrhaf4n4hsv4y4tyngeqfcb.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x_6 => _unsafe_index_1
# Graph fragment:
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
triton_poi_fused__unsafe_index_10 = async_compile.triton('triton_poi_fused__unsafe_index_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x2 = (xindex // 1024)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x2)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wg/cwgfgzz3d4qzmvkta5akesvlwc67vghvei7my24qvlnzamusnofy.py
# Topologically Sorted Source Nodes: [x_7, x_9, u2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
# Source node to ATen node mapping:
# u2 => cat_1
# x_7 => convolution_4
# x_9 => add_11, rsqrt_3, var_mean_3
# Graph fragment:
# %convolution_4 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_11,), kwargs = {})
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_7, %view_1], 1), kwargs = {})
triton_per_fused__native_batch_norm_legit_cat_convolution_11 = async_compile.triton('triton_per_fused__native_batch_norm_legit_cat_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_cat_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_11(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 64
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
x1 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 1024, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 1024.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None)
tl.store(out_ptr2 + (r2 + (1024*x0) + (32768*x1)), tmp27, None)
tl.store(out_ptr3 + (x3), tmp26, None)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wi/cwivvwhbvwrphtzexppvwxzqlr2mamv45fab7upa3wymzmylszsw.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_10 => add_12, add_13, convert_element_type_8, convert_element_type_9, iota_4, mul_17, mul_18
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_4, 1), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_17, 0), kwargs = {})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_12, torch.float32), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, 0.5), kwargs = {})
# %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_18, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_12 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ai/caisjnzwrty3ekx4iyqud4hticx3kowaagajg7cbzrejqwcm7pua.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x_10 => _unsafe_index_2
# Graph fragment:
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_1, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {})
triton_poi_fused__unsafe_index_13 = async_compile.triton('triton_poi_fused__unsafe_index_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x2 = (xindex // 4096)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x2)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sf/csflzd5zwuhkkb6naan66y57g2tq4dk5nqpwcsnjmfjrby35k3rj.py
# Topologically Sorted Source Nodes: [x_11, x_13, u3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
# Source node to ATen node mapping:
# u3 => cat_2
# x_11 => convolution_5
# x_13 => add_16, rsqrt_4, var_mean_4
# Graph fragment:
# %convolution_5 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_8, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_14, 1e-05), kwargs = {})
# %rsqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_16,), kwargs = {})
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_9, %where], 1), kwargs = {})
triton_red_fused__native_batch_norm_legit_cat_convolution_14 = async_compile.triton('triton_red_fused__native_batch_norm_legit_cat_convolution_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[32, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_cat_convolution_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_cat_convolution_14(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 8
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp9_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp9_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp9_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp9_mean_next, tmp9_m2_next, tmp9_weight_next = triton_helpers.welford_reduce(
tmp8, tmp9_mean, tmp9_m2, tmp9_weight, roffset == 0
)
tmp9_mean = tl.where(rmask & xmask, tmp9_mean_next, tmp9_mean)
tmp9_m2 = tl.where(rmask & xmask, tmp9_m2_next, tmp9_m2)
tmp9_weight = tl.where(rmask & xmask, tmp9_weight_next, tmp9_weight)
tl.store(in_out_ptr0 + (r2 + (4096*x3)), tmp2, rmask & xmask)
tmp9_tmp, tmp10_tmp, tmp11_tmp = triton_helpers.welford(
tmp9_mean, tmp9_m2, tmp9_weight, 1
)
tmp9 = tmp9_tmp[:, None]
tmp10 = tmp10_tmp[:, None]
tmp11 = tmp11_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp9, xmask)
x1 = (xindex // 8)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp12 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tmp17 - tmp9
tmp19 = 4096.0
tmp20 = tmp10 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tl.store(out_ptr2 + (r2 + (4096*x0) + (65536*x1)), tmp24, rmask & xmask)
tmp25 = 4096.0
tmp26 = tmp10 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tl.store(out_ptr3 + (x3), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yf/cyfp4kh4hal3hsl5hetkw6bnbhjxjpexovh66lmh2567uhdou3jb.py
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# x_14 => convolution_6
# x_15 => tanh
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_tanh_15 = async_compile.triton('triton_poi_fused_convolution_tanh_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (8, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (16, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (8, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (8, ), (1, ))
assert_size_stride(primals_14, (3, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_15, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32)
buf54 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.float32)
buf53 = reinterpret_tensor(buf54, (4, 8, 64, 64), (65536, 4096, 64, 1), 32768) # alias
# Topologically Sorted Source Nodes: [conv2d, d1, u3], Original ATen: [aten.convolution, aten.leaky_relu, aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, buf53, 131072, grid=grid(131072), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.float32)
buf4 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf2, buf3, buf4, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf6 = buf5; del buf5 # reuse
buf7 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf11 = empty_strided_cuda((1, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf43 = reinterpret_tensor(buf0, (4, 32, 32, 32), (32768, 1024, 32, 1), 0); del buf0 # reuse
buf42 = reinterpret_tensor(buf43, (4, 16, 32, 32), (32768, 1024, 32, 1), 16384) # alias
buf10 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, d2, u2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_convolution_2.run(buf6, primals_5, buf7, buf11, buf42, buf10, 64, 1024, grid=grid(64), stream=stream0)
del primals_5
buf12 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.float32)
buf13 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf11, buf12, buf13, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 32, 16, 16), (8192, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
buf16 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf20 = empty_strided_cuda((1, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf32 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
buf31 = reinterpret_tensor(buf32, (4, 32, 16, 16), (16384, 256, 16, 1), 8192) # alias
buf19 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, d3, u1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_convolution_4.run(buf15, primals_7, buf16, buf20, buf31, buf19, 128, 256, grid=grid(128), stream=stream0)
del primals_7
buf21 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [encoder], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf20, buf21, 8192, grid=grid(8192), stream=stream0)
buf22 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_6.run(buf22, 16, grid=grid(16), stream=stream0)
buf23 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [encoder, x_2], Original ATen: [aten.max_pool2d_with_indices, aten._unsafe_index]
triton_poi_fused__unsafe_index_max_pool2d_with_indices_7.run(buf22, buf20, buf23, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 16, 16), (8192, 256, 16, 1))
buf25 = buf24; del buf24 # reuse
buf26 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf30 = reinterpret_tensor(buf32, (4, 32, 16, 16), (16384, 256, 16, 1), 0) # alias
buf29 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_5, u1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_convolution_8.run(buf25, primals_9, buf26, buf30, buf29, 128, 256, grid=grid(128), stream=stream0)
del primals_9
buf33 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_9.run(buf33, 32, grid=grid(32), stream=stream0)
del buf30
del buf31
buf34 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._unsafe_index]
triton_poi_fused__unsafe_index_10.run(buf33, buf32, buf34, 262144, grid=grid(262144), stream=stream0)
del buf32
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf36 = buf35; del buf35 # reuse
buf37 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf41 = reinterpret_tensor(buf43, (4, 16, 32, 32), (32768, 1024, 32, 1), 0) # alias
buf40 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_7, x_9, u2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
triton_per_fused__native_batch_norm_legit_cat_convolution_11.run(buf36, primals_11, buf37, buf41, buf40, 64, 1024, grid=grid(64), stream=stream0)
del primals_11
buf44 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_12.run(buf44, 64, grid=grid(64), stream=stream0)
del buf41
del buf42
buf45 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten._unsafe_index]
triton_poi_fused__unsafe_index_13.run(buf44, buf43, buf45, 524288, grid=grid(524288), stream=stream0)
del buf43
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf47 = buf46; del buf46 # reuse
buf48 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
buf52 = reinterpret_tensor(buf54, (4, 8, 64, 64), (65536, 4096, 64, 1), 0) # alias
buf51 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32)
# Topologically Sorted Source Nodes: [x_11, x_13, u3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.cat]
triton_red_fused__native_batch_norm_legit_cat_convolution_14.run(buf47, primals_13, buf48, buf52, buf51, 32, 4096, grid=grid(32), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf54, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf56 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_15.run(buf56, primals_15, 49152, grid=grid(49152), stream=stream0)
del primals_15
return (buf56, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf2, buf3, buf4, buf6, reinterpret_tensor(buf10, (64, ), (1, ), 0), reinterpret_tensor(buf11, (4, 16, 32, 32), (16384, 1024, 32, 1), 0), buf12, buf13, buf15, reinterpret_tensor(buf19, (128, ), (1, ), 0), reinterpret_tensor(buf20, (4, 32, 16, 16), (8192, 256, 16, 1), 0), buf21, buf22, buf23, buf25, reinterpret_tensor(buf29, (128, ), (1, ), 0), buf33, buf34, buf36, reinterpret_tensor(buf40, (64, ), (1, ), 0), buf44, buf45, buf47, reinterpret_tensor(buf51, (32, ), (1, ), 0), buf54, buf56, reinterpret_tensor(buf48, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf37, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf26, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf16, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf7, (1, 64, 1, 1), (64, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((8, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((3, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class gen_ab_cf(nn.Module):
def __init__(self):
super().__init__()
self.d1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3,
stride=1, padding=1)
self.d2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3,
stride=1, padding=1)
self.d3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3,
stride=1, padding=1)
self.enmaxpool = nn.MaxPool2d(2)
self.u1 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3,
padding=1)
self.u2 = nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3,
padding=1)
self.u3 = nn.Conv2d(in_channels=32, out_channels=8, kernel_size=3,
padding=1)
self.up1 = nn.Upsample(scale_factor=2)
self.output = nn.Conv2d(in_channels=16, out_channels=3, kernel_size
=3, padding=1)
def forward(self, x):
d1 = F.leaky_relu(self.d1(x), 0.2)
x = F.max_pool2d(d1, 2)
d2 = F.instance_norm(F.leaky_relu(self.d2(x), 0.2))
x = F.max_pool2d(d2, 2)
d3 = F.instance_norm(F.leaky_relu(self.d3(x), 0.2))
encoder = self.enmaxpool(d3)
x = self.up1(encoder)
x = self.u1(x)
x = F.leaky_relu(x, 0.2)
x = F.instance_norm(x)
u1 = torch.cat((x, d3), 1)
x = self.up1(u1)
x = self.u2(x)
x = F.leaky_relu(x, 0.2)
x = F.instance_norm(x)
u2 = torch.cat((x, d2), 1)
x = self.up1(u2)
x = self.u3(x)
x = F.leaky_relu(x, 0.2)
x = F.instance_norm(x)
u3 = torch.cat((x, d1), 1)
x = self.output(u3)
x = F.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_convolution_leaky_relu_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x1 = xindex // 4096 % 8
x2 = xindex // 32768
x3 = xindex % 32768
tmp0 = tl.load(in_ptr0 + x4, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x4, tmp4, None)
tl.store(out_ptr1 + x4, tmp7, None)
tl.store(out_ptr2 + (x3 + 65536 * x2), tmp7, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_2(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 1024, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 1024.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 1024 * x3), tmp27, None)
tl.store(out_ptr3 + (r2 + 1024 * x0 + 32768 * x1), tmp27, None)
tl.store(out_ptr4 + x3, tmp26, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_4(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
x1 = xindex // 32
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 256 * x3), tmp27, None)
tl.store(out_ptr3 + (r2 + 256 * x0 + 16384 * x1), tmp27, None)
tl.store(out_ptr4 + x3, tmp26, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_max_pool2d_with_indices_7(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (2 * tmp8 + 32 * tmp4 + 256 * x2), None,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 2 * tmp8 + 32 * tmp4 + 256 * x2), None,
eviction_policy='evict_last')
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tl.load(in_ptr1 + (16 + 2 * tmp8 + 32 * tmp4 + 256 * x2), None,
eviction_policy='evict_last')
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + (17 + 2 * tmp8 + 32 * tmp4 + 256 * x2), None,
eviction_policy='evict_last')
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x4, tmp15, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_8(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 32
x1 = xindex // 32
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 256, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 256.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 256 * x0 + 16384 * x1), tmp27, None)
tl.store(out_ptr3 + x3, tmp26, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_9(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_10(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x2 = xindex // 1024
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_cat_convolution_11(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.full([1], 1024, tl.int32)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp12 / tmp14
tmp16 = tmp8 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tmp7 - tmp15
tmp22 = 1024.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None)
tl.store(out_ptr2 + (r2 + 1024 * x0 + 32768 * x1), tmp27, None)
tl.store(out_ptr3 + x3, tmp26, None)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_12(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_13(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_cat_convolution_14(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 32
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 8
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp9_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp9_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp9_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp9_mean_next, tmp9_m2_next, tmp9_weight_next = (triton_helpers.
welford_reduce(tmp8, tmp9_mean, tmp9_m2, tmp9_weight, roffset == 0)
)
tmp9_mean = tl.where(rmask & xmask, tmp9_mean_next, tmp9_mean)
tmp9_m2 = tl.where(rmask & xmask, tmp9_m2_next, tmp9_m2)
tmp9_weight = tl.where(rmask & xmask, tmp9_weight_next, tmp9_weight)
tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask)
tmp9_tmp, tmp10_tmp, tmp11_tmp = triton_helpers.welford(tmp9_mean,
tmp9_m2, tmp9_weight, 1)
tmp9 = tmp9_tmp[:, None]
tmp10 = tmp10_tmp[:, None]
tmp11_tmp[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
x1 = xindex // 8
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp12 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tmp17 - tmp9
tmp19 = 4096.0
tmp20 = tmp10 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tl.store(out_ptr2 + (r2 + 4096 * x0 + 65536 * x1), tmp24, rmask & xmask
)
tmp25 = 4096.0
tmp26 = tmp10 / tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.rsqrt(tmp28)
tl.store(out_ptr3 + x3, tmp29, xmask)
@triton.jit
def triton_poi_fused_convolution_tanh_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (8, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (16, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (8, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (8,), (1,))
assert_size_stride(primals_14, (3, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_15, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1),
torch.float32)
buf54 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.float32)
buf53 = reinterpret_tensor(buf54, (4, 8, 64, 64), (65536, 4096, 64,
1), 32768)
get_raw_stream(0)
triton_poi_fused_cat_convolution_leaky_relu_0[grid(131072)](buf0,
primals_2, buf1, buf2, buf53, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 8, 32, 32), (8192, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(32768)](buf2, buf3,
buf4, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32
)
buf11 = empty_strided_cuda((1, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf43 = reinterpret_tensor(buf0, (4, 32, 32, 32), (32768, 1024, 32,
1), 0)
del buf0
buf42 = reinterpret_tensor(buf43, (4, 16, 32, 32), (32768, 1024, 32,
1), 16384)
buf10 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_cat_convolution_2[grid(64)](
buf6, primals_5, buf7, buf11, buf42, buf10, 64, 1024, num_warps
=8, num_stages=1)
del primals_5
buf12 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.float32)
buf13 = empty_strided_cuda((4, 16, 16, 16), (4096, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf11,
buf12, buf13, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 32, 16, 16), (8192, 256, 16, 1))
buf15 = buf14
del buf14
buf16 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf20 = empty_strided_cuda((1, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf32 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
buf31 = reinterpret_tensor(buf32, (4, 32, 16, 16), (16384, 256, 16,
1), 8192)
buf19 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_cat_convolution_4[grid(128)](
buf15, primals_7, buf16, buf20, buf31, buf19, 128, 256,
num_warps=2, num_stages=1)
del primals_7
buf21 = empty_strided_cuda((4, 32, 8, 8), (2048, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(8192)](buf20, buf21,
8192, XBLOCK=128, num_warps=4, num_stages=1)
buf22 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_6[grid(16)](buf22, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
triton_poi_fused__unsafe_index_max_pool2d_with_indices_7[grid(32768)](
buf22, buf20, buf23, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 16, 16), (8192, 256, 16, 1))
buf25 = buf24
del buf24
buf26 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf30 = reinterpret_tensor(buf32, (4, 32, 16, 16), (16384, 256, 16,
1), 0)
buf29 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_cat_convolution_8[grid(128)](
buf25, primals_9, buf26, buf30, buf29, 128, 256, num_warps=2,
num_stages=1)
del primals_9
buf33 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_9[grid(32)](buf33, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del buf30
del buf31
buf34 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused__unsafe_index_10[grid(262144)](buf33, buf32, buf34,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del buf32
buf35 = extern_kernels.convolution(buf34, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf36 = buf35
del buf35
buf37 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf41 = reinterpret_tensor(buf43, (4, 16, 32, 32), (32768, 1024, 32,
1), 0)
buf40 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_per_fused__native_batch_norm_legit_cat_convolution_11[grid(64)](
buf36, primals_11, buf37, buf41, buf40, 64, 1024, num_warps=8,
num_stages=1)
del primals_11
buf44 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_12[grid(64)](buf44, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf41
del buf42
buf45 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
triton_poi_fused__unsafe_index_13[grid(524288)](buf44, buf43, buf45,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf43
buf46 = extern_kernels.convolution(buf45, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf47 = buf46
del buf46
buf48 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
buf52 = reinterpret_tensor(buf54, (4, 8, 64, 64), (65536, 4096, 64,
1), 0)
buf51 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.
float32)
triton_red_fused__native_batch_norm_legit_cat_convolution_14[grid(32)](
buf47, primals_13, buf48, buf52, buf51, 32, 4096, XBLOCK=1,
RBLOCK=2048, num_warps=16, num_stages=1)
del primals_13
buf55 = extern_kernels.convolution(buf54, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf56 = buf55
del buf55
triton_poi_fused_convolution_tanh_15[grid(49152)](buf56, primals_15,
49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
return (buf56, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf2, buf3, buf4, buf6,
reinterpret_tensor(buf10, (64,), (1,), 0), reinterpret_tensor(buf11,
(4, 16, 32, 32), (16384, 1024, 32, 1), 0), buf12, buf13, buf15,
reinterpret_tensor(buf19, (128,), (1,), 0), reinterpret_tensor(
buf20, (4, 32, 16, 16), (8192, 256, 16, 1), 0), buf21, buf22, buf23,
buf25, reinterpret_tensor(buf29, (128,), (1,), 0), buf33, buf34,
buf36, reinterpret_tensor(buf40, (64,), (1,), 0), buf44, buf45,
buf47, reinterpret_tensor(buf51, (32,), (1,), 0), buf54, buf56,
reinterpret_tensor(buf48, (1, 32, 1, 1), (32, 1, 1, 1), 0),
reinterpret_tensor(buf37, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf26, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf16, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf7, (1, 64, 1, 1), (64, 1, 1, 1), 0))
class gen_ab_cfNew(nn.Module):
def __init__(self):
super().__init__()
self.d1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3,
stride=1, padding=1)
self.d2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3,
stride=1, padding=1)
self.d3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3,
stride=1, padding=1)
self.enmaxpool = nn.MaxPool2d(2)
self.u1 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3,
padding=1)
self.u2 = nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3,
padding=1)
self.u3 = nn.Conv2d(in_channels=32, out_channels=8, kernel_size=3,
padding=1)
self.up1 = nn.Upsample(scale_factor=2)
self.output = nn.Conv2d(in_channels=16, out_channels=3, kernel_size
=3, padding=1)
def forward(self, input_0):
primals_1 = self.d1.weight
primals_2 = self.d1.bias
primals_4 = self.d2.weight
primals_5 = self.d2.bias
primals_6 = self.d3.weight
primals_7 = self.d3.bias
primals_8 = self.u1.weight
primals_9 = self.u1.bias
primals_10 = self.u2.weight
primals_11 = self.u2.bias
primals_12 = self.u3.weight
primals_13 = self.u3.bias
primals_14 = self.output.weight
primals_15 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| layel2/layyer-lib | gen_ab_cf | false | 3,884 | [
"MIT"
] | 0 | db48b5c38098ee93d2d34693d98e5ef4d319d919 | https://github.com/layel2/layyer-lib/tree/db48b5c38098ee93d2d34693d98e5ef4d319d919 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.d1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3,
stride=1, padding=1)
self.d2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3,
stride=1, padding=1)
self.d3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3,
stride=1, padding=1)
self.enmaxpool = nn.MaxPool2d(2)
self.u1 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3,
padding=1)
self.u2 = nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3,
padding=1)
self.u3 = nn.Conv2d(in_channels=32, out_channels=8, kernel_size=3,
padding=1)
self.up1 = nn.Upsample(scale_factor=2)
self.output = nn.Conv2d(in_channels=16, out_channels=3, kernel_size
=3, padding=1)
def forward(self, x):
d1 = F.leaky_relu(self.d1(x), 0.2)
x = F.max_pool2d(d1, 2)
d2 = F.instance_norm(F.leaky_relu(self.d2(x), 0.2))
x = F.max_pool2d(d2, 2)
d3 = F.instance_norm(F.leaky_relu(self.d3(x), 0.2))
encoder = self.enmaxpool(d3)
x = self.up1(encoder)
x = self.u1(x)
x = F.leaky_relu(x, 0.2)
x = F.instance_norm(x)
u1 = torch.cat((x, d3), 1)
x = self.up1(u1)
x = self.u2(x)
x = F.leaky_relu(x, 0.2)
x = F.instance_norm(x)
u2 = torch.cat((x, d2), 1)
x = self.up1(u2)
x = self.u3(x)
x = F.leaky_relu(x, 0.2)
x = F.instance_norm(x)
u3 = torch.cat((x, d1), 1)
x = self.output(u3)
x = F.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
MultiLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pu/cpu5hzfefz7limmmrr6spzanoriiictxlg4uggegovs7kp7pvgje.py
# Topologically Sorted Source Nodes: [type_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# type_1 => convert_element_type
# Graph fragment:
# %convert_element_type : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%select, torch.int64), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp1 = tmp0.to(tl.int64)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qq/cqqe2erb265w5u627lkj2yyybu3awp7yhcnwna76rd6eo3g3psq6.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten.index]
# Source node to ATen node mapping:
# weights => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_1, [%convert_element_type]), kwargs = {})
triton_poi_fused_index_1 = async_compile.triton('triton_poi_fused_index_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (x0 + (16*tmp4)), xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/c2/cc2gtouifho62qi2kfmn6og6tvij7wy3oi6aqj27a7mmu7yh3nrj.py
# Topologically Sorted Source Nodes: [biases, add], Original ATen: [aten.index, aten.add]
# Source node to ATen node mapping:
# add => add
# biases => index_1
# Graph fragment:
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_3, [%convert_element_type]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %index_1), kwargs = {})
triton_poi_fused_add_index_2 = async_compile.triton('triton_poi_fused_add_index_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_index_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 4")
tmp7 = tl.load(in_ptr1 + (x0 + (4*tmp5)), xmask)
tmp8 = tmp0 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (5, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [type_1], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(primals_2, buf0, 4, grid=grid(4), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten.index]
triton_poi_fused_index_1.run(buf0, primals_1, buf1, 64, grid=grid(64), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weights, bmm], Original ATen: [aten.index, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_4, (4, 1, 4), (4, 4, 1), 0), buf1, out=buf2)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [biases, add], Original ATen: [aten.index, aten.add]
triton_poi_fused_add_index_2.run(buf3, buf0, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf3, buf0, reinterpret_tensor(primals_4, (4, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def tensor(x, dtype=torch.float32):
if torch.is_tensor(x):
return x.type(dtype)
x = torch.tensor(x, device=Config.DEVICE, dtype=dtype)
return x
def batch_linear(input, weight, bias=None):
""" input: (N, D), weight: (N, D, H), bias: (N, H) """
if bias is not None:
return torch.bmm(input.unsqueeze(1), weight).squeeze(1) + bias
else:
return torch.bmm(input.unsqueeze(1), weight).squeeze(1)
def weight_init(weight, w_scale=1.0):
init_f = nn.init.orthogonal_
init_f(weight.data)
weight.data.mul_(w_scale)
return weight
class BaseNormalizer:
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def state_dict(self):
return None
def load_state_dict(self, _):
return
class RescaleNormalizer(BaseNormalizer):
def __init__(self, coef=1.0):
BaseNormalizer.__init__(self)
self.coef = coef
def __call__(self, x):
x = np.asarray(x)
return self.coef * x
class Config:
DEVICE = torch.device('cpu')
def __init__(self):
self.parser = argparse.ArgumentParser()
self.task_fn = None
self.optimizer_fn = None
self.actor_optimizer_fn = None
self.critic_optimizer_fn = None
self.network_fn = None
self.actor_network_fn = None
self.critic_network_fn = None
self.replay_fn = None
self.random_process_fn = None
self.discount = None
self.target_network_update_freq = None
self.exploration_steps = None
self.logger = None
self.history_length = None
self.double_q = False
self.tag = 'vanilla'
self.num_workers = 1
self.gradient_clip = None
self.entropy_weight = 0
self.use_gae = False
self.gae_tau = 1.0
self.target_network_mix = 0.001
self.state_normalizer = RescaleNormalizer()
self.reward_normalizer = RescaleNormalizer()
self.min_memory_size = None
self.max_steps = 0
self.rollout_length = None
self.value_loss_weight = 1.0
self.iteration_log_interval = 30
self.categorical_v_min = None
self.categorical_v_max = None
self.categorical_n_atoms = 51
self.num_quantiles = None
self.optimization_epochs = 4
self.mini_batch_size = 64
self.termination_regularizer = 0
self.sgd_update_frequency = None
self.random_action_prob = None
self.__eval_env = None
self.log_interval = int(1000.0)
self.save_interval = 0
self.eval_interval = 0
self.eval_episodes = 10
self.async_actor = True
self.abs_dim = 512
@property
def eval_env(self):
return self.__eval_env
@eval_env.setter
def eval_env(self, env):
self.__eval_env = env
self.state_dim = env.state_dim
self.action_dim = env.action_dim
self.task_name = env.name
def add_argument(self, *args, **kwargs):
self.parser.add_argument(*args, **kwargs)
def merge(self, config_dict=None):
if config_dict is None:
args = self.parser.parse_args()
config_dict = args.__dict__
for key in config_dict.keys():
setattr(self, key, config_dict[key])
class MultiLinear(nn.Module):
def __init__(self, input_dim, output_dim, n_heads, key, w_scale=1.0):
super().__init__()
self.weights = nn.Parameter(weight_init(torch.randn(n_heads,
input_dim, output_dim), w_scale=w_scale))
self.biases = nn.Parameter(torch.zeros(n_heads, output_dim))
self.key = key
def forward(self, inputs, info):
weights = self.weights[tensor(info[self.key], torch.int64), :, :]
biases = self.biases[tensor(info[self.key], torch.int64), :]
return batch_linear(inputs, weight=weights, bias=biases)
def get_weight(self, info):
return self.weights[tensor(info[self.key], torch.int64), :, :]
def load_weight(self, weight_dict):
for i, weight in weight_dict.items():
self.weights.data[i] = weight
def get_inputs():
return [torch.rand([4, 4]), torch.rand([5, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'n_heads': 4, 'key': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp1 = tmp0.to(tl.int64)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 16 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_index_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr1 + (x0 + 4 * tmp5), xmask)
tmp8 = tmp0 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (5, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(4)](primals_2, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_index_1[grid(64)](buf0, primals_1, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (4, 1, 4), (4, 4,
1), 0), buf1, out=buf2)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
triton_poi_fused_add_index_2[grid(16)](buf3, buf0, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf3, buf0, reinterpret_tensor(primals_4, (4, 4, 1), (4, 1, 4), 0)
def tensor(x, dtype=torch.float32):
if torch.is_tensor(x):
return x.type(dtype)
x = torch.tensor(x, device=Config.DEVICE, dtype=dtype)
return x
def batch_linear(input, weight, bias=None):
""" input: (N, D), weight: (N, D, H), bias: (N, H) """
if bias is not None:
return torch.bmm(input.unsqueeze(1), weight).squeeze(1) + bias
else:
return torch.bmm(input.unsqueeze(1), weight).squeeze(1)
def weight_init(weight, w_scale=1.0):
init_f = nn.init.orthogonal_
init_f(weight.data)
weight.data.mul_(w_scale)
return weight
class BaseNormalizer:
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def state_dict(self):
return None
def load_state_dict(self, _):
return
class RescaleNormalizer(BaseNormalizer):
def __init__(self, coef=1.0):
BaseNormalizer.__init__(self)
self.coef = coef
def __call__(self, x):
x = np.asarray(x)
return self.coef * x
class Config:
DEVICE = torch.device('cpu')
def __init__(self):
self.parser = argparse.ArgumentParser()
self.task_fn = None
self.optimizer_fn = None
self.actor_optimizer_fn = None
self.critic_optimizer_fn = None
self.network_fn = None
self.actor_network_fn = None
self.critic_network_fn = None
self.replay_fn = None
self.random_process_fn = None
self.discount = None
self.target_network_update_freq = None
self.exploration_steps = None
self.logger = None
self.history_length = None
self.double_q = False
self.tag = 'vanilla'
self.num_workers = 1
self.gradient_clip = None
self.entropy_weight = 0
self.use_gae = False
self.gae_tau = 1.0
self.target_network_mix = 0.001
self.state_normalizer = RescaleNormalizer()
self.reward_normalizer = RescaleNormalizer()
self.min_memory_size = None
self.max_steps = 0
self.rollout_length = None
self.value_loss_weight = 1.0
self.iteration_log_interval = 30
self.categorical_v_min = None
self.categorical_v_max = None
self.categorical_n_atoms = 51
self.num_quantiles = None
self.optimization_epochs = 4
self.mini_batch_size = 64
self.termination_regularizer = 0
self.sgd_update_frequency = None
self.random_action_prob = None
self.__eval_env = None
self.log_interval = int(1000.0)
self.save_interval = 0
self.eval_interval = 0
self.eval_episodes = 10
self.async_actor = True
self.abs_dim = 512
@property
def eval_env(self):
return self.__eval_env
@eval_env.setter
def eval_env(self, env):
self.__eval_env = env
self.state_dim = env.state_dim
self.action_dim = env.action_dim
self.task_name = env.name
def add_argument(self, *args, **kwargs):
self.parser.add_argument(*args, **kwargs)
def merge(self, config_dict=None):
if config_dict is None:
args = self.parser.parse_args()
config_dict = args.__dict__
for key in config_dict.keys():
setattr(self, key, config_dict[key])
class MultiLinearNew(nn.Module):
def __init__(self, input_dim, output_dim, n_heads, key, w_scale=1.0):
super().__init__()
self.weights = nn.Parameter(weight_init(torch.randn(n_heads,
input_dim, output_dim), w_scale=w_scale))
self.biases = nn.Parameter(torch.zeros(n_heads, output_dim))
self.key = key
def get_weight(self, info):
return self.weights[tensor(info[self.key], torch.int64), :, :]
def load_weight(self, weight_dict):
for i, weight in weight_dict.items():
self.weights.data[i] = weight
def forward(self, input_0, input_1):
primals_1 = self.weights
primals_3 = self.biases
primals_4 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| lchenat/TSA | MultiLinear | false | 3,885 | [
"Apache-2.0"
] | 0 | 661266ba16e06f63962b306a7c30d25f37920c2d | https://github.com/lchenat/TSA/tree/661266ba16e06f63962b306a7c30d25f37920c2d | import torch
import numpy as np
import torch.nn as nn
def tensor(x, dtype=torch.float32):
if torch.is_tensor(x):
return x.type(dtype)
x = torch.tensor(x, device=Config.DEVICE, dtype=dtype)
return x
def batch_linear(input, weight, bias=None):
""" input: (N, D), weight: (N, D, H), bias: (N, H) """
if bias is not None:
return torch.bmm(input.unsqueeze(1), weight).squeeze(1) + bias
else:
return torch.bmm(input.unsqueeze(1), weight).squeeze(1)
def weight_init(weight, w_scale=1.0):
init_f = nn.init.orthogonal_
init_f(weight.data)
weight.data.mul_(w_scale)
return weight
class BaseNormalizer:
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def state_dict(self):
return None
def load_state_dict(self, _):
return
class RescaleNormalizer(BaseNormalizer):
def __init__(self, coef=1.0):
BaseNormalizer.__init__(self)
self.coef = coef
def __call__(self, x):
x = np.asarray(x)
return self.coef * x
class Config:
DEVICE = torch.device('cpu')
def __init__(self):
self.parser = argparse.ArgumentParser()
self.task_fn = None
self.optimizer_fn = None
self.actor_optimizer_fn = None
self.critic_optimizer_fn = None
self.network_fn = None
self.actor_network_fn = None
self.critic_network_fn = None
self.replay_fn = None
self.random_process_fn = None
self.discount = None
self.target_network_update_freq = None
self.exploration_steps = None
self.logger = None
self.history_length = None
self.double_q = False
self.tag = 'vanilla'
self.num_workers = 1
self.gradient_clip = None
self.entropy_weight = 0
self.use_gae = False
self.gae_tau = 1.0
self.target_network_mix = 0.001
self.state_normalizer = RescaleNormalizer()
self.reward_normalizer = RescaleNormalizer()
self.min_memory_size = None
self.max_steps = 0
self.rollout_length = None
self.value_loss_weight = 1.0
self.iteration_log_interval = 30
self.categorical_v_min = None
self.categorical_v_max = None
self.categorical_n_atoms = 51
self.num_quantiles = None
self.optimization_epochs = 4
self.mini_batch_size = 64
self.termination_regularizer = 0
self.sgd_update_frequency = None
self.random_action_prob = None
self.__eval_env = None
self.log_interval = int(1000.0)
self.save_interval = 0
self.eval_interval = 0
self.eval_episodes = 10
self.async_actor = True
self.abs_dim = 512
@property
def eval_env(self):
return self.__eval_env
@eval_env.setter
def eval_env(self, env):
self.__eval_env = env
self.state_dim = env.state_dim
self.action_dim = env.action_dim
self.task_name = env.name
def add_argument(self, *args, **kwargs):
self.parser.add_argument(*args, **kwargs)
def merge(self, config_dict=None):
if config_dict is None:
args = self.parser.parse_args()
config_dict = args.__dict__
for key in config_dict.keys():
setattr(self, key, config_dict[key])
class Model(nn.Module):
def __init__(self, input_dim, output_dim, n_heads, key, w_scale=1.0):
super().__init__()
self.weights = nn.Parameter(weight_init(torch.randn(n_heads,
input_dim, output_dim), w_scale=w_scale))
self.biases = nn.Parameter(torch.zeros(n_heads, output_dim))
self.key = key
def forward(self, inputs, info):
weights = self.weights[tensor(info[self.key], torch.int64), :, :]
biases = self.biases[tensor(info[self.ke
# ... truncated (>4000 chars) for memory efficiency |
TransformerBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ey/ceyozlsoonc7o42eynjdzjbq7ix4xpja3bd7eoeely6ypzzem2a4.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [0, 1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_per_fused_native_layer_norm_0 = async_compile.triton('triton_per_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_layer_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp21 = tl.load(in_ptr1 + (r0), None)
tmp23 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 16.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 * tmp18
tmp22 = tmp20 * tmp21
tmp24 = tmp22 + tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None)
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp24, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ol/colbiyeeegfdyyzeckjnylgg3xt3rkh3aadcz7fjtfx5472nedsg.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_2
# Graph fragment:
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/q7/cq73c5ooraibj5uk4ux6lvn5xmsnxx46j7bthg3buq3ramf6alkv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/aj/cajaf3uwltkop5opnit33ytdbaf4kfffungbdvzdj6h4mgiacix5.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_1 => add_2
# x_2 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %primals_1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [0, 1]), kwargs = {correction: 0, keepdim: True})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_9), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_3), kwargs = {})
triton_per_fused_add_native_layer_norm_5 = async_compile.triton('triton_per_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_native_layer_norm_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_native_layer_norm_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp23 = tl.load(in_ptr2 + (r0), None)
tmp25 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 16.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp2 - tmp10
tmp22 = tmp21 * tmp20
tmp24 = tmp22 * tmp23
tmp26 = tmp24 + tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None)
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp26, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dl/cdltpn3oomwlklfq755lyas32ec23wprtt5vostfbpxqrhs6ob5s.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_4 => add_5, erf, mul_5, mul_6, mul_7
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, 0.5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_5), kwargs = {})
triton_poi_fused_gelu_6 = async_compile.triton('triton_poi_fused_gelu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/aa/caaq4ucpkt3vjzksxe4guk7bxswlads73thjjwgkify4pzmk3n6g.py
# Topologically Sorted Source Nodes: [x_1, x_6], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add_2
# x_6 => add_6
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %primals_1), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_tensor, %add_2), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf3 = buf1; del buf1 # reuse
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_per_fused_native_layer_norm_0.run(buf3, primals_1, primals_2, primals_3, buf0, buf4, 1, 16, grid=grid(1), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), buf4, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), buf4, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf7)
buf8 = reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 16), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf8, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
del buf10
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf11, reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 1), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf12, buf13, 4, 4, grid=grid(4, 4), stream=stream0)
buf14 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf13, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14)
del primals_7
buf15 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf16 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf18 = buf16; del buf16 # reuse
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_per_fused_add_native_layer_norm_5.run(buf18, buf14, primals_1, primals_2, primals_3, buf15, buf19, 1, 16, grid=grid(1), stream=stream0)
del primals_3
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf19, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf20)
del primals_9
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu]
triton_poi_fused_gelu_6.run(buf20, buf21, 16, grid=grid(16), stream=stream0)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf21, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf22)
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [x_1, x_6], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf23, primals_11, buf14, primals_1, 16, grid=grid(16), stream=stream0)
del primals_11
return (buf23, primals_1, primals_2, buf0, buf3, buf4, buf11, reinterpret_tensor(buf13, (4, 4), (4, 1), 0), buf14, buf15, buf18, buf19, buf20, buf21, primals_10, primals_8, primals_6, reinterpret_tensor(buf7, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf8, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class TransformerBlock(nn.Module):
def __init__(self, seq_len: 'int', embed_channels: 'int', mlp_dims:
'int', num_heads: 'int'):
super().__init__()
self.embed_channels = embed_channels
self.seq_len = seq_len
self.mlp_dims = mlp_dims
self.num_heads = num_heads
self.layer_norm = nn.LayerNorm([self.seq_len, self.embed_channels])
self.self_attn = nn.MultiheadAttention(self.embed_channels, self.
num_heads)
self.emb_to_mlp = nn.Linear(self.embed_channels, self.mlp_dims)
self.mlp_to_emb = nn.Linear(self.mlp_dims, self.embed_channels)
def forward(self, x: 'torch.Tensor'):
shortcut = x
x = self.layer_norm(x)
x, _ = self.self_attn(x, x, x)
x = x + shortcut
shortcut2 = x
x = self.layer_norm(x)
x = self.emb_to_mlp(x)
x = F.gelu(x)
x = self.mlp_to_emb(x)
x = x + shortcut2
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'seq_len': 4, 'embed_channels': 4, 'mlp_dims': 4,
'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp21 = tl.load(in_ptr1 + r0, None)
tmp23 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 16.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 * tmp18
tmp22 = tmp20 * tmp21
tmp24 = tmp22 + tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp24, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_add_native_layer_norm_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr
):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp23 = tl.load(in_ptr2 + r0, None)
tmp25 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp7 = tl.sum(tmp5, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 16.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp2 - tmp10
tmp22 = tmp21 * tmp20
tmp24 = tmp22 * tmp23
tmp26 = tmp24 + tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
@triton.jit
def triton_poi_fused_gelu_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf3 = buf1
del buf1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_native_layer_norm_0[grid(1)](buf3, primals_1,
primals_2, primals_3, buf0, buf4, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf4, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf4, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf7)
buf8 = reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 16), 0)
del buf5
triton_poi_fused_mul_1[grid(16)](buf8, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf6, (4, 1, 4), (1, 1,
4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = buf9
del buf9
triton_poi_fused__softmax_3[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf11, reinterpret_tensor(buf7, (4, 4, 1), (1, 4,
1), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(4, 4)](buf12, buf13, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_7, reinterpret_tensor(buf13, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_7
buf15 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf16 = empty_strided_cuda((1, 1), (1, 1), torch.float32)
buf18 = buf16
del buf16
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_per_fused_add_native_layer_norm_5[grid(1)](buf18, buf14,
primals_1, primals_2, primals_3, buf15, buf19, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_3
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf19, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf20)
del primals_9
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_gelu_6[grid(16)](buf20, buf21, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf21, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf22)
buf23 = buf22
del buf22
triton_poi_fused_add_7[grid(16)](buf23, primals_11, buf14,
primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
return (buf23, primals_1, primals_2, buf0, buf3, buf4, buf11,
reinterpret_tensor(buf13, (4, 4), (4, 1), 0), buf14, buf15, buf18,
buf19, buf20, buf21, primals_10, primals_8, primals_6,
reinterpret_tensor(buf7, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf8, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class TransformerBlockNew(nn.Module):
def __init__(self, seq_len: 'int', embed_channels: 'int', mlp_dims:
'int', num_heads: 'int'):
super().__init__()
self.embed_channels = embed_channels
self.seq_len = seq_len
self.mlp_dims = mlp_dims
self.num_heads = num_heads
self.layer_norm = nn.LayerNorm([self.seq_len, self.embed_channels])
self.self_attn = nn.MultiheadAttention(self.embed_channels, self.
num_heads)
self.emb_to_mlp = nn.Linear(self.embed_channels, self.mlp_dims)
self.mlp_to_emb = nn.Linear(self.mlp_dims, self.embed_channels)
def forward(self, input_0):
primals_1 = self.layer_norm.weight
primals_2 = self.layer_norm.bias
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_3 = self.self_attn.out_proj.weight
primals_7 = self.self_attn.out_proj.bias
primals_6 = self.emb_to_mlp.weight
primals_9 = self.emb_to_mlp.bias
primals_8 = self.mlp_to_emb.weight
primals_11 = self.mlp_to_emb.bias
primals_10 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| ketan0/ddim | TransformerBlock | false | 3,886 | [
"MIT"
] | 0 | 26f2de1107885a3f332dd8435b73a1eaedbe10a8 | https://github.com/ketan0/ddim/tree/26f2de1107885a3f332dd8435b73a1eaedbe10a8 | import torch
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, seq_len: 'int', embed_channels: 'int', mlp_dims:
'int', num_heads: 'int'):
super().__init__()
self.embed_channels = embed_channels
self.seq_len = seq_len
self.mlp_dims = mlp_dims
self.num_heads = num_heads
self.layer_norm = nn.LayerNorm([self.seq_len, self.embed_channels])
self.self_attn = nn.MultiheadAttention(self.embed_channels, self.
num_heads)
self.emb_to_mlp = nn.Linear(self.embed_channels, self.mlp_dims)
self.mlp_to_emb = nn.Linear(self.mlp_dims, self.embed_channels)
def forward(self, x: 'torch.Tensor'):
shortcut = x
x = self.layer_norm(x)
x, _ = self.self_attn(x, x, x)
x = x + shortcut
shortcut2 = x
x = self.layer_norm(x)
x = self.emb_to_mlp(x)
x = F.gelu(x)
x = self.mlp_to_emb(x)
x = x + shortcut2
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'seq_len': 4, 'embed_channels': 4, 'mlp_dims': 4,
'num_heads': 4}]
|
EntMinLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wv/cwvti54lsojjpkh6f73xsvv55wjtno2rrmtjgaznlsjst37yn74a.py
# Topologically Sorted Source Nodes: [soft_f_x, log_soft_f_x], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# log_soft_f_x => amax_1, sub_1
# soft_f_x => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_1), kwargs = {})
triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wg/cwgc6ymt2nmvdkvhsaj4ueis35jzr3zo7xwpdzzrmm7eqvkklluo.py
# Topologically Sorted Source Nodes: [soft_f_x, log_soft_f_x, mul, sum_1, neg, ent], Original ATen: [aten._softmax, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# ent => div_1
# log_soft_f_x => exp_1, log, sub_2, sum_2
# mul => mul
# neg => neg
# soft_f_x => div, sum_1
# sum_1 => sum_3
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_3,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 4), kwargs = {})
triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1 = async_compile.triton('triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp25 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp10 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = _tmp25 + tmp24
_tmp25 = tl.where(rmask, tmp26, _tmp25)
tmp25 = tl.sum(_tmp25, 1)[:, None]
tmp27 = -tmp25
tmp28 = 0.25
tmp29 = tmp27 * tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [soft_f_x, log_soft_f_x], Original ATen: [aten._softmax, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0.run(arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [soft_f_x, log_soft_f_x, mul, sum_1, neg, ent], Original ATen: [aten._softmax, aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1.run(buf4, buf0, buf1, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class EntMinLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, f_x):
soft_f_x = F.softmax(f_x, dim=-1)
log_soft_f_x = F.log_softmax(f_x, dim=-1)
ent = -torch.sum(soft_f_x * log_soft_f_x) / f_x.shape[0]
return ent
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp25 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp10 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = _tmp25 + tmp24
_tmp25 = tl.where(rmask, tmp26, _tmp25)
tmp25 = tl.sum(_tmp25, 1)[:, None]
tmp27 = -tmp25
tmp28 = 0.25
tmp29 = tmp27 * tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf0,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1[grid(1)](buf4,
buf0, buf1, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1
)
del buf0
del buf1
return buf4,
class EntMinLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| leoandeol/ldir | EntMinLoss | false | 3,887 | [
"MIT"
] | 0 | f90408c5fb16a52c6c5a76fff1c46b9062343ad5 | https://github.com/leoandeol/ldir/tree/f90408c5fb16a52c6c5a76fff1c46b9062343ad5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, f_x):
soft_f_x = F.softmax(f_x, dim=-1)
log_soft_f_x = F.log_softmax(f_x, dim=-1)
ent = -torch.sum(soft_f_x * log_soft_f_x) / f_x.shape[0]
return ent
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CeCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# loss => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4s/c4sinl6spiqxhpvyucixluedc7ia6rt4kmscnsrfq6ef6uymhfpa.py
# Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2
# loss_1 => mul_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = 1.0
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class CeCriterion(Criterion):
def __init__(self, alpha=1.0, name='Cross Entropy Criterion'):
"""
交叉熵损失
:param alpha:
:type alpha:
:param name:
:type name:
"""
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
if weight:
loss = torch.mean(F.cross_entropy(input, target, reduce=False,
ignore_index=ignore_index) * weight)
else:
loss = F.cross_entropy(input, target, ignore_index=ignore_index)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = 1.0
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class CeCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='Cross Entropy Criterion'):
"""
交叉熵损失
:param alpha:
:type alpha:
:param name:
:type name:
"""
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| johnson7788/mt-dnn | CeCriterion | false | 3,888 | [
"MIT"
] | 0 | 26e5c4a5bfdbf1a1dd1c903e606db1c070568237 | https://github.com/johnson7788/mt-dnn/tree/26e5c4a5bfdbf1a1dd1c903e606db1c070568237 | import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class Model(Criterion):
def __init__(self, alpha=1.0, name='Cross Entropy Criterion'):
"""
交叉熵损失
:param alpha:
:type alpha:
:param name:
:type name:
"""
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
if weight:
loss = torch.mean(F.cross_entropy(input, target, reduce=False,
ignore_index=ignore_index) * weight)
else:
loss = F.cross_entropy(input, target, ignore_index=ignore_index)
loss = loss * self.alpha
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
gconv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dn/cdn3ssz2ymyotadkjimza47hdny463qd2fz4t5x62xxbt3dr3gd7.py
# Topologically Sorted Source Nodes: [y, y_1, y_2], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y => convolution
# y_1 => add
# y_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [y, y_1, y_2], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_relu_threshold_backward_0.run(buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class gconv(nn.Module):
def __init__(self, channel):
super(gconv, self).__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv2d(channel, channel, kernel_size=3, padding=1)
def forward(self, x):
y = self.conv(x)
y = y + x
y = self.relu(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_convolution_relu_threshold_backward_0[grid(256)](
buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class gconvNew(nn.Module):
def __init__(self, channel):
super(gconvNew, self).__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv2d(channel, channel, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lee-zq/MRDN | gconv | false | 3,889 | [
"Apache-2.0"
] | 0 | 976c1f8cd0d4b1943378149ef836bb86dd5fc0cd | https://github.com/lee-zq/MRDN/tree/976c1f8cd0d4b1943378149ef836bb86dd5fc0cd | import torch
import torch.nn as nn
import torch.utils.model_zoo
class Model(nn.Module):
def __init__(self, channel):
super().__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv2d(channel, channel, kernel_size=3, padding=1)
def forward(self, x):
y = self.conv(x)
y = y + x
y = self.relu(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
adaLIN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/um/cumfxxnuff67646tii7qigo5xu4wp3dwgamqagptzcoe6havnpgp.py
# Topologically Sorted Source Nodes: [ln_mean, ln_var, add_1, sqrt_1], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add_1 => add_1
# ln_mean => mean_1
# ln_var => var_1
# sqrt_1 => sqrt_1
# Graph fragment:
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1, 2, 3], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 1, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_1, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_mean_sqrt_var_0 = async_compile.triton('triton_per_fused_add_mean_sqrt_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sqrt_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3n/c3ndqo7nij3t5k6ubr2vtyqzix6kntc5ziep6mlsj6ad5b7y77jj.py
# Topologically Sorted Source Nodes: [in_mean, in_var, sub, add, sqrt, out_in, sub_1, out_ln, mul, sub_2, mul_1, out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add => add
# in_mean => mean
# in_var => var
# mul => mul
# mul_1 => mul_1
# out => add_2
# out_in => div
# out_ln => div_1
# sqrt => sqrt
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [2, 3]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp26
tmp33 = tmp0 - tmp32
tmp35 = tmp33 / tmp34
tmp36 = tmp31 * tmp35
tmp37 = tmp29 + tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bh/cbhkkkfowmcaigx6sts2oovquwlb7qm6qdlrdzh26rfoghoreml6.py
# Topologically Sorted Source Nodes: [sub, out_in, sub_1, out_ln, mul, sub_2, mul_1, out, mul_2, out_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# out => add_2
# out_1 => add_3
# out_in => div
# out_ln => div_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %unsqueeze_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_3), kwargs = {})
triton_poi_fused_add_div_mul_rsub_sub_2 = async_compile.triton('triton_poi_fused_add_div_mul_rsub_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_rsub_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x0 = xindex % 16
x2 = (xindex // 256)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf6 # reuse
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [ln_mean, ln_var, add_1, sqrt_1], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0.run(buf7, buf11, primals_1, 4, 64, grid=grid(4), stream=stream0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf3 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [in_mean, in_var, sub, add, sqrt, out_in, sub_1, out_ln, mul, sub_2, mul_1, out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul, aten.rsub]
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1.run(buf1, buf5, primals_1, primals_2, buf7, buf11, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_2
buf13 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, out_in, sub_1, out_ln, mul, sub_2, mul_1, out, mul_2, out_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_div_mul_rsub_sub_2.run(buf12, primals_3, primals_4, buf13, 4096, grid=grid(4096), stream=stream0)
del buf12
del primals_4
return (buf13, primals_1, primals_3, buf1, buf5, buf7, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class adaLIN(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(adaLIN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 -
self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2
).unsqueeze(3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp26
tmp33 = tmp0 - tmp32
tmp35 = tmp33 / tmp34
tmp36 = tmp31 * tmp35
tmp37 = tmp29 + tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp37, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x0 = xindex % 16
x2 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x4, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf6
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11,
primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1[grid(16)](buf1,
buf5, primals_1, primals_2, buf7, buf11, buf12, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del primals_2
buf13 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16,
4, 1), torch.float32)
triton_poi_fused_add_div_mul_rsub_sub_2[grid(4096)](buf12,
primals_3, primals_4, buf13, 4096, XBLOCK=256, num_warps=4,
num_stages=1)
del buf12
del primals_4
return buf13, primals_1, primals_3, buf1, buf5, buf7, buf11
class adaLINNew(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(adaLINNew, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input_0, input_1, input_2):
primals_2 = self.rho
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| ldzhangyu/photo2cartoon | adaLIN | false | 3,890 | [
"MIT"
] | 0 | d5b371e77e61018c28109db67e8306e5e6064800 | https://github.com/ldzhangyu/photo2cartoon/tree/d5b371e77e61018c28109db67e8306e5e6064800 | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_features, eps=1e-05):
super().__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 -
self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2
).unsqueeze(3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LinearModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3s/c3swieudbq6hlerbvzxffejbssgds5rhgg5xk7zaijlgerjj4eqy.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_3 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_3 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (50, 4), (4, 1))
assert_size_stride(primals_3, (50, ), (1, ))
assert_size_stride(primals_4, (50, 50), (50, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (4, 50), (50, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 50), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 200, grid=grid(200), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (50, 50), (1, 50), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 200, grid=grid(200), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (50, 4), (1, 50), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
del buf5
return (buf6, primals_1, buf1, buf3, buf6, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class LinearModel(nn.Module):
"""Model creation.
"""
def __init__(self, input_dim, output_dim):
super(LinearModel, self).__init__()
self.layer1 = nn.Linear(input_dim, 50)
self.layer2 = nn.Linear(50, 50)
self.layer3 = nn.Linear(50, output_dim)
def forward(self, x):
"""Forward pass."""
x = torch.flatten(x, start_dim=1)
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = F.softmax(self.layer3(x), dim=1)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (50, 4), (4, 1))
assert_size_stride(primals_3, (50,), (1,))
assert_size_stride(primals_4, (50, 50), (50, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (4, 50), (50, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 50),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(200)](buf1, primals_3, 200, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (50, 50), (1,
50), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(200)](buf3, primals_5, 200, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(50, 4), (1, 50), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf5
return buf6, primals_1, buf1, buf3, buf6, primals_6, primals_4
class LinearModelNew(nn.Module):
"""Model creation.
"""
def __init__(self, input_dim, output_dim):
super(LinearModelNew, self).__init__()
self.layer1 = nn.Linear(input_dim, 50)
self.layer2 = nn.Linear(50, 50)
self.layer3 = nn.Linear(50, output_dim)
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.layer3.weight
primals_7 = self.layer3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| learniotai/iotai-sensor-classifications | LinearModel | false | 3,891 | [
"Apache-2.0"
] | 0 | ba2527cb317afa30a5c495d1cddc16f7dc2936ed | https://github.com/learniotai/iotai-sensor-classifications/tree/ba2527cb317afa30a5c495d1cddc16f7dc2936ed | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
"""Model creation.
"""
def __init__(self, input_dim, output_dim):
super().__init__()
self.layer1 = nn.Linear(input_dim, 50)
self.layer2 = nn.Linear(50, 50)
self.layer3 = nn.Linear(50, output_dim)
def forward(self, x):
"""Forward pass."""
x = torch.flatten(x, start_dim=1)
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = F.softmax(self.layer3(x), dim=1)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
_MCLSTMCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7x/c7xa2fzoeg7fkdhz3rijf3negddtnquocgqamwnzmzil4bl2mrsa.py
# Topologically Sorted Source Nodes: [ct, norm, add, truediv], Original ATen: [aten.new_zeros, aten.linalg_vector_norm, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# ct => full
# norm => full_default, pow_2, sum_1
# truediv => div
# Graph fragment:
# %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%full_default, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%full, %add), kwargs = {})
triton_per_fused_add_div_linalg_vector_norm_new_zeros_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_new_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(2,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = (rindex // 4)
tmp0 = 0.0
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp0 / tmp5
tl.store(out_ptr1 + (tl.broadcast_to(r1 + (12*r2), [XBLOCK, RBLOCK])), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ik/ciktnpigrrv57ihedpahnqmqkyqnrxb2ve44dt2xuj4zd3xovwjp.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_for_fused_1 = async_compile.triton('triton_for_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.foreach(
num_warps=8,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'kernel_name': 'triton_for_fused_1', 'mutated_arg_names': [], 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
)
@triton.jit
def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1):
pid = tl.program_id(0)
XBLOCK: tl.constexpr = 1024
num_xblocks_0 = tl.cdiv(16, XBLOCK)
num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK)
if pid < num_xblocks_0:
pid_offset = pid
xnumel = 16
rnumel = 1
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask)
elif pid < num_xblocks_1:
pid_offset = pid - num_xblocks_0
xnumel = 16
rnumel = 1
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x3 = xindex % 4
x4 = (xindex // 4)
tmp1 = tl.load(in_ptr1 + (x5), xmask)
tl.store(out_ptr1 + (x3 + (12*x4)), tmp1, xmask)
else:
pass
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ro/croozag2ipv2izkskbnusmwfrgzvgxuxdtgvh6cmwihuv2ft2g6q.py
# Topologically Sorted Source Nodes: [sigmoid, i], Original ATen: [aten.sigmoid, aten.div]
# Source node to ATen node mapping:
# i => div_1
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sigmoid, %expand), kwargs = {})
triton_poi_fused_div_sigmoid_2 = async_compile.triton('triton_poi_fused_div_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tl_math.abs(tmp3)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tl.sigmoid(tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl_math.abs(tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 1e-12
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tmp1 / tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qo/cqohmoojuze4ghdeumxkas5argfq7scxxrrrrmi4ja5aujxqylbm.py
# Topologically Sorted Source Nodes: [ct], Original ATen: [aten.new_zeros]
# Source node to ATen node mapping:
# ct => full
# Graph fragment:
# %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_zeros_3 = async_compile.triton('triton_poi_fused_new_zeros_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sb/csb52ia7nfbux6ajj3tis46aqykq6p4giv3lsll3x4sellt3qcel.py
# Topologically Sorted Source Nodes: [relu, r], Original ATen: [aten.relu, aten.div]
# Source node to ATen node mapping:
# r => div_2
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %expand_1), kwargs = {})
triton_poi_fused_div_relu_4 = async_compile.triton('triton_poi_fused_div_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tl_math.abs(tmp4)
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tl_math.abs(tmp7)
tmp9 = tmp5 + tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp9 + tmp12
tmp15 = triton_helpers.maximum(tmp1, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = tmp13 + tmp16
tmp18 = 1e-12
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp2 / tmp19
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7c/c7clas5lw2nusjly52sgckjeygnor3o6xedozxfydrxogcc5ah6f.py
# Topologically Sorted Source Nodes: [o, m_new, sub, ct_1, norm_1, add_2], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# add_2 => add_2
# ct_1 => mul_1
# m_new => add_1
# norm_1 => abs_4, pow_8, sum_4
# o => sigmoid_1
# sub => sub
# Graph fragment:
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_2,), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %squeeze_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {})
# %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %add_1), kwargs = {})
# %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_1,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_4, None), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 1.0), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_8, 1e-05), kwargs = {})
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_out_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp2, None)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ui/cuiig7hwoftgd73y5g4nxeusjygas4d43jgp6jstawnsxmuvtesk.py
# Topologically Sorted Source Nodes: [features_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# features_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_1, %select_5, %div_3], -1), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp15 = tl.load(in_ptr3 + (0))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (16 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u6/cu64qvgmjs5gpnv6x7okvus6vtjdt7t7hrq67kwq7qihx4y6hhlw.py
# Topologically Sorted Source Nodes: [o_1, m_new_1, sub_1, ct_2, norm_2, add_4], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# add_4 => add_4
# ct_2 => mul_3
# m_new_1 => add_3
# norm_2 => abs_7, pow_14, sum_7
# o_1 => sigmoid_3
# sub_1 => sub_1
# Graph fragment:
# %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_5,), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %squeeze_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_3), kwargs = {})
# %mul_3 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %add_3), kwargs = {})
# %abs_7 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_3,), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_7, None), kwargs = {})
# %pow_14 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_7, 1.0), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_14, 1e-05), kwargs = {})
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_out_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp2, None)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tl/ctlao5dome6dvekazjdh4pzndckhm5j242u3aretn7zguljoglem.py
# Topologically Sorted Source Nodes: [features_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# features_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_2, %select_6, %div_6], -1), kwargs = {})
triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp15 = tl.load(in_ptr3 + (0))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (32 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5e/c5egga74hinxugpzzgffdhddgnaewjqkcaq6bjtgbdqsoepsqhjp.py
# Topologically Sorted Source Nodes: [features_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# features_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_3, %select_7, %div_9], -1), kwargs = {})
triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp15 = tl.load(in_ptr3 + (0))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (48 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (48 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/56/c56dlfmd6ph7xm4t43oba7ddqf5xjsaulzwau7eevrwffyi54oh6.py
# Topologically Sorted Source Nodes: [m_new_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# m_new_3 => add_7
# Graph fragment:
# %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_6, %squeeze_7), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/en/cengjp6catznd72cpbumzf3yqxvgant3mspxbpqyimls3kme6rgd.py
# Topologically Sorted Source Nodes: [m_out, c], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# c => cat_5
# m_out => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_2, %mul_4, %mul_6],), kwargs = {})
# %cat_5 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_1, %mul_3, %mul_5, %mul_7],), kwargs = {})
triton_poi_fused_stack_11 = async_compile.triton('triton_poi_fused_stack_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tl.sigmoid(tmp15)
tmp17 = tl.load(in_ptr3 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr4 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0)
tmp26 = tl.sigmoid(tmp25)
tmp27 = tl.load(in_ptr5 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tmp32 = tl.full([1], 16, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tl.load(in_ptr6 + (x0 + (4*((-12) + x1))), tmp31 & xmask, other=0.0)
tmp35 = tl.sigmoid(tmp34)
tmp36 = tl.load(in_ptr7 + (x0 + (4*((-12) + x1))), tmp31 & xmask, other=0.0)
tmp37 = tmp35 * tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tmp43 = tl.load(in_ptr8 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp44 = tl.load(in_ptr9 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0)
tmp45 = tl.load(in_ptr10 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0)
tmp46 = 1.0
tmp47 = tmp46 - tmp35
tmp48 = tmp47 * tmp36
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp31, tmp48, tmp49)
tmp51 = tl.where(tmp24, tmp45, tmp50)
tmp52 = tl.where(tmp14, tmp44, tmp51)
tmp53 = tl.where(tmp4, tmp43, tmp52)
tl.store(out_ptr0 + (x2), tmp42, xmask)
tl.store(out_ptr1 + (x2), tmp53, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 12), (12, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (16, 12), (12, 1))
assert_size_stride(primals_6, (16, ), (1, ))
assert_size_stride(primals_7, (4, 12), (12, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8) # alias
# Topologically Sorted Source Nodes: [ct, norm, add, truediv], Original ATen: [aten.new_zeros, aten.linalg_vector_norm, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_new_zeros_0.run(buf3, 1, 16, grid=grid(1), stream=stream0)
buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0) # alias
buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4) # alias
# Unsorted Source Nodes: [], Original ATen: []
triton_for_fused_1.run(primals_1, primals_2, buf1, buf2, grid=(2, 1, 1), stream=stream0)
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, i], Original ATen: [aten.sigmoid, aten.div]
triton_poi_fused_div_sigmoid_2.run(buf5, buf8, 64, grid=grid(64), stream=stream0)
buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, i, matmul], Original ATen: [aten.sigmoid, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ct], Original ATen: [aten.new_zeros]
triton_poi_fused_new_zeros_3.run(buf10, 16, grid=grid(16), stream=stream0)
buf11 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [relu, r], Original ATen: [aten.relu, aten.div]
triton_poi_fused_div_relu_4.run(buf6, buf11, 64, grid=grid(64), stream=stream0)
buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu, r, matmul_1], Original ATen: [aten.relu, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1), 0), buf11, out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0); del buf12 # reuse
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((), (), torch.float32)
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [o, m_new, sub, ct_1, norm_1, add_2], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm]
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5.run(buf13, buf16, buf9, buf7, buf14, 1, 16, grid=grid(1), stream=stream0)
buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [features_1], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(primals_1, primals_2, buf14, buf16, buf17, 48, grid=grid(48), stream=stream0)
buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18)
buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu_1, r_1], Original ATen: [aten.relu, aten.div]
triton_poi_fused_div_relu_4.run(buf19, buf20, 64, grid=grid(64), stream=stream0)
buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_2, i_1], Original ATen: [aten.sigmoid, aten.div]
triton_poi_fused_div_sigmoid_2.run(buf18, buf22, 64, grid=grid(64), stream=stream0)
buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_2, i_1, matmul_2], Original ATen: [aten.sigmoid, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 16), buf22, out=buf23)
buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1), 0), buf20, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0); del buf23 # reuse
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((), (), torch.float32)
buf28 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [o_1, m_new_1, sub_1, ct_2, norm_2, add_4], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm]
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7.run(buf25, buf28, buf24, buf21, buf26, 1, 16, grid=grid(1), stream=stream0)
buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [features_2], Original ATen: [aten.cat]
triton_poi_fused_cat_8.run(primals_1, primals_2, buf26, buf28, buf29, 48, grid=grid(48), stream=stream0)
buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30)
buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu_2, r_2], Original ATen: [aten.relu, aten.div]
triton_poi_fused_div_relu_4.run(buf31, buf32, 64, grid=grid(64), stream=stream0)
buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [linear_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33)
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_4, i_2], Original ATen: [aten.sigmoid, aten.div]
triton_poi_fused_div_sigmoid_2.run(buf30, buf34, 64, grid=grid(64), stream=stream0)
buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_4, i_2, matmul_4], Original ATen: [aten.sigmoid, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 32), buf34, out=buf35)
buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1), 0), buf32, out=buf36)
buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0); del buf35 # reuse
buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((), (), torch.float32)
buf40 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [o_2, m_new_2, sub_2, ct_3, norm_3, add_6], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm]
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7.run(buf37, buf40, buf36, buf33, buf38, 1, 16, grid=grid(1), stream=stream0)
buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [features_3], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(primals_1, primals_2, buf38, buf40, buf41, 48, grid=grid(48), stream=stream0)
del primals_2
buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0); del buf34 # reuse
# Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42)
del primals_4
buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_10], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43)
del primals_6
buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu_3, r_3], Original ATen: [aten.relu, aten.div]
triton_poi_fused_div_relu_4.run(buf43, buf44, 64, grid=grid(64), stream=stream0)
buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0); del buf36 # reuse
# Topologically Sorted Source Nodes: [linear_11], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45)
del primals_8
buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_6, i_3], Original ATen: [aten.sigmoid, aten.div]
triton_poi_fused_div_sigmoid_2.run(buf42, buf46, 64, grid=grid(64), stream=stream0)
buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_6, i_3, matmul_6], Original ATen: [aten.sigmoid, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 48), buf46, out=buf47)
buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1), 0), buf44, out=buf48)
buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0); del buf47 # reuse
# Topologically Sorted Source Nodes: [m_new_3], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf49, buf48, 16, grid=grid(16), stream=stream0)
del buf48
buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0); del buf46 # reuse
buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [m_out, c], Original ATen: [aten.stack]
triton_poi_fused_stack_11.run(buf7, buf13, buf21, buf25, buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51, 64, grid=grid(64), stream=stream0)
return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5, buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21, buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37, buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7, primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16), reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from typing import Tuple
import torch.nn as nn
class _Gate(nn.Module):
"""Utility class to implement a standard sigmoid gate"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(_Gate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalised gate"""
return torch.sigmoid(self.fc(x))
class _NormalizedGate(nn.Module):
"""Utility class to implement a gate with normalised activation function"""
def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]',
normalizer: 'str'):
super(_NormalizedGate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_shape
[0] * out_shape[1])
self.out_shape = out_shape
if normalizer == 'normalized_sigmoid':
self.activation = nn.Sigmoid()
elif normalizer == 'normalized_relu':
self.activation = nn.ReLU()
else:
raise ValueError(
f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}"
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalized gate"""
h = self.fc(x).view(-1, *self.out_shape)
return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1)
class _MCLSTMCell(nn.Module):
"""The logic of the MC-LSTM cell"""
def __init__(self, mass_input_size: 'int', aux_input_size: 'int',
hidden_size: 'int', cfg: 'Config'):
super(_MCLSTMCell, self).__init__()
self.cfg = cfg
self._hidden_size = hidden_size
gate_inputs = aux_input_size + hidden_size + mass_input_size
self.output_gate = _Gate(in_features=gate_inputs, out_features=
hidden_size)
self.input_gate = _NormalizedGate(in_features=gate_inputs,
out_shape=(mass_input_size, hidden_size), normalizer=
'normalized_sigmoid')
self.redistribution = _NormalizedGate(in_features=gate_inputs,
out_shape=(hidden_size, hidden_size), normalizer='normalized_relu')
self._reset_parameters()
def _reset_parameters(self):
if self.cfg.initial_forget_bias is not None:
nn.init.constant_(self.output_gate.fc.bias, val=self.cfg.
initial_forget_bias)
def forward(self, x_m: 'torch.Tensor', x_a: 'torch.Tensor') ->Tuple[
torch.Tensor, torch.Tensor]:
"""Perform forward pass on the MC-LSTM cell.
Parameters
----------
x_m : torch.Tensor
Mass input that will be conserved by the network.
x_a : torch.Tensor
Auxiliary inputs that will be used to modulate the gates but whose information won't be stored internally
in the MC-LSTM cells.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Outgoing mass and memory cells per time step of shape [sequence length, batch size, hidden size]
"""
_, batch_size, _ = x_m.size()
ct = x_m.new_zeros((batch_size, self._hidden_size))
m_out, c = [], []
for xt_m, xt_a in zip(x_m, x_a):
mt_out, ct = self._step(xt_m, xt_a, ct)
m_out.append(mt_out)
c.append(ct)
m_out, c = torch.stack(m_out), torch.stack(c)
return m_out, c
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1)
i = self.input_gate(features)
r = self.redistribution(features)
o = self.output_gate(features)
m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'mass_input_size': 4, 'aux_input_size': 4, 'hidden_size':
4, 'cfg': _mock_config(initial_forget_bias=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from typing import Tuple
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = rindex // 4
tmp0 = 0.0
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp0 / tmp5
tl.store(out_ptr1 + tl.broadcast_to(r1 + 12 * r2, [XBLOCK, RBLOCK]),
tmp6, None)
@triton.jit
def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1):
pid = tl.program_id(0)
XBLOCK: tl.constexpr = 1024
num_xblocks_0 = tl.cdiv(16, XBLOCK)
num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK)
if pid < num_xblocks_0:
pid_offset = pid
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask)
elif pid < num_xblocks_1:
pid_offset = pid - num_xblocks_0
xnumel = 16
xoffset = pid_offset * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x5 = xindex
x3 = xindex % 4
x4 = xindex // 4
tmp1 = tl.load(in_ptr1 + x5, xmask)
tl.store(out_ptr1 + (x3 + 12 * x4), tmp1, xmask)
else:
pass
@triton.jit
def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.sigmoid(tmp2)
tmp4 = tl_math.abs(tmp3)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp4 + tmp7
tmp10 = tl.sigmoid(tmp9)
tmp11 = tl_math.abs(tmp10)
tmp12 = tmp8 + tmp11
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl_math.abs(tmp14)
tmp16 = tmp12 + tmp15
tmp17 = 1e-12
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tmp1 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tl_math.abs(tmp4)
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tl_math.abs(tmp7)
tmp9 = tmp5 + tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp9 + tmp12
tmp15 = triton_helpers.maximum(tmp1, tmp14)
tmp16 = tl_math.abs(tmp15)
tmp17 = tmp13 + tmp16
tmp18 = 1e-12
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp2 / tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_out_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_out_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp6 * tmp2
tmp8 = tl_math.abs(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp15 = tl.load(in_ptr3 + 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (48 + 4 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp14 / tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp9, tmp10, tmp19)
tmp21 = tl.where(tmp4, tmp5, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tl.sigmoid(tmp15)
tmp17 = tl.load(in_ptr3 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr4 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp26 = tl.sigmoid(tmp25)
tmp27 = tl.load(in_ptr5 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tl.full([1], 16, tl.int64)
tmp34 = tl.load(in_ptr6 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0)
tmp35 = tl.sigmoid(tmp34)
tmp36 = tl.load(in_ptr7 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0)
tmp37 = tmp35 * tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tmp43 = tl.load(in_ptr8 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp44 = tl.load(in_ptr9 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0)
tmp45 = tl.load(in_ptr10 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0)
tmp46 = 1.0
tmp47 = tmp46 - tmp35
tmp48 = tmp47 * tmp36
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp31, tmp48, tmp49)
tmp51 = tl.where(tmp24, tmp45, tmp50)
tmp52 = tl.where(tmp14, tmp44, tmp51)
tmp53 = tl.where(tmp4, tmp43, tmp52)
tl.store(out_ptr0 + x2, tmp42, xmask)
tl.store(out_ptr1 + x2, tmp53, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 12), (12, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 12), (12, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (4, 12), (12, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8)
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_new_zeros_0[grid(1)](buf3,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0)
buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4)
triton_for_fused_1[2, 1, 1](primals_1, primals_2, buf1, buf2,
num_warps=8, num_stages=1)
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf5, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 0), buf8, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_new_zeros_3[grid(16)](buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf11 = buf8
del buf8
triton_poi_fused_div_relu_4[grid(64)](buf6, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1),
0), buf11, out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0)
del buf12
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((), (), torch.float32)
buf16 = buf15
del buf15
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5[grid(1)](
buf13, buf16, buf9, buf7, buf14, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_6[grid(48)](primals_1, primals_2, buf14, buf16,
buf17, 48, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0)
del buf11
extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18)
buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf19, buf20, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf18, buf22, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 16), buf22, out=buf23)
buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1),
0), buf20, out=buf24)
buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0)
del buf23
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((), (), torch.float32)
buf28 = buf27
del buf27
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)](
buf25, buf28, buf24, buf21, buf26, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_8[grid(48)](primals_1, primals_2, buf26, buf28,
buf29, 48, XBLOCK=64, num_warps=1, num_stages=1)
buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0)
del buf22
extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30)
buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf31, buf32, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0)
del buf24
extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33)
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf30, buf34, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 32), buf34, out=buf35)
buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1),
0), buf32, out=buf36)
buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0)
del buf35
buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf39 = empty_strided_cuda((), (), torch.float32)
buf40 = buf39
del buf39
triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)](
buf37, buf40, buf36, buf33, buf38, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_9[grid(48)](primals_1, primals_2, buf38, buf40,
buf41, 48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0)
del buf34
extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42)
del primals_4
buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5,
(12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43)
del primals_6
buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_relu_4[grid(64)](buf43, buf44, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0)
del buf36
extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7,
(12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45)
del primals_8
buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sigmoid_2[grid(64)](buf42, buf46, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4,
1), 48), buf46, out=buf47)
buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1),
0), buf44, out=buf48)
buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0)
del buf47
triton_poi_fused_add_10[grid(16)](buf49, buf48, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf48
buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0)
del buf46
buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused_stack_11[grid(64)](buf7, buf13, buf21, buf25,
buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51,
64, XBLOCK=64, num_warps=1, num_stages=1)
return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0),
reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5,
buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21,
buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37,
buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49,
reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7,
primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4,
1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16),
reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0))
class _Gate(nn.Module):
"""Utility class to implement a standard sigmoid gate"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(_Gate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalised gate"""
return torch.sigmoid(self.fc(x))
class _NormalizedGate(nn.Module):
"""Utility class to implement a gate with normalised activation function"""
def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]',
normalizer: 'str'):
super(_NormalizedGate, self).__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_shape
[0] * out_shape[1])
self.out_shape = out_shape
if normalizer == 'normalized_sigmoid':
self.activation = nn.Sigmoid()
elif normalizer == 'normalized_relu':
self.activation = nn.ReLU()
else:
raise ValueError(
f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}"
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalized gate"""
h = self.fc(x).view(-1, *self.out_shape)
return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1)
class _MCLSTMCellNew(nn.Module):
"""The logic of the MC-LSTM cell"""
def __init__(self, mass_input_size: 'int', aux_input_size: 'int',
hidden_size: 'int', cfg: 'Config'):
super(_MCLSTMCellNew, self).__init__()
self.cfg = cfg
self._hidden_size = hidden_size
gate_inputs = aux_input_size + hidden_size + mass_input_size
self.output_gate = _Gate(in_features=gate_inputs, out_features=
hidden_size)
self.input_gate = _NormalizedGate(in_features=gate_inputs,
out_shape=(mass_input_size, hidden_size), normalizer=
'normalized_sigmoid')
self.redistribution = _NormalizedGate(in_features=gate_inputs,
out_shape=(hidden_size, hidden_size), normalizer='normalized_relu')
self._reset_parameters()
def _reset_parameters(self):
if self.cfg.initial_forget_bias is not None:
nn.init.constant_(self.output_gate.fc.bias, val=self.cfg.
initial_forget_bias)
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1)
i = self.input_gate(features)
r = self.redistribution(features)
o = self.output_gate(features)
m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2)
m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2)
m_new = m_in + m_sys
return o * m_new, (1 - o) * m_new
def forward(self, input_0, input_1):
primals_7 = self.output_gate.fc.weight
primals_8 = self.output_gate.fc.bias
primals_3 = self.input_gate.fc.weight
primals_4 = self.input_gate.fc.bias
primals_5 = self.redistribution.fc.weight
primals_6 = self.redistribution.fc.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| kyleniemeyer/neuralhydrology | _MCLSTMCell | false | 3,892 | [
"BSD-3-Clause"
] | 0 | 440fda715c4f746a2d56b058b9af2f0e03c36aa0 | https://github.com/kyleniemeyer/neuralhydrology/tree/440fda715c4f746a2d56b058b9af2f0e03c36aa0 | from _paritybench_helpers import _mock_config
import torch
from typing import Tuple
import torch.nn as nn
class _Gate(nn.Module):
"""Utility class to implement a standard sigmoid gate"""
def __init__(self, in_features: 'int', out_features: 'int'):
super().__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_features)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalised gate"""
return torch.sigmoid(self.fc(x))
class _NormalizedGate(nn.Module):
"""Utility class to implement a gate with normalised activation function"""
def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]',
normalizer: 'str'):
super().__init__()
self.fc = nn.Linear(in_features=in_features, out_features=out_shape
[0] * out_shape[1])
self.out_shape = out_shape
if normalizer == 'normalized_sigmoid':
self.activation = nn.Sigmoid()
elif normalizer == 'normalized_relu':
self.activation = nn.ReLU()
else:
raise ValueError(
f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}"
)
self._reset_parameters()
def _reset_parameters(self):
nn.init.orthogonal_(self.fc.weight)
nn.init.zeros_(self.fc.bias)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Perform forward pass through the normalized gate"""
h = self.fc(x).view(-1, *self.out_shape)
return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1)
class Model(nn.Module):
"""The logic of the MC-LSTM cell"""
def __init__(self, mass_input_size: 'int', aux_input_size: 'int',
hidden_size: 'int', cfg: 'Config'):
super().__init__()
self.cfg = cfg
self._hidden_size = hidden_size
gate_inputs = aux_input_size + hidden_size + mass_input_size
self.output_gate = _Gate(in_features=gate_inputs, out_features=
hidden_size)
self.input_gate = _NormalizedGate(in_features=gate_inputs,
out_shape=(mass_input_size, hidden_size), normalizer=
'normalized_sigmoid')
self.redistribution = _NormalizedGate(in_features=gate_inputs,
out_shape=(hidden_size, hidden_size), normalizer='normalized_relu')
self._reset_parameters()
def _reset_parameters(self):
if self.cfg.initial_forget_bias is not None:
nn.init.constant_(self.output_gate.fc.bias, val=self.cfg.
initial_forget_bias)
def forward(self, x_m: 'torch.Tensor', x_a: 'torch.Tensor') ->Tuple[
torch.Tensor, torch.Tensor]:
"""Perform forward pass on the MC-LSTM cell.
Parameters
----------
x_m : torch.Tensor
Mass input that will be conserved by the network.
x_a : torch.Tensor
Auxiliary inputs that will be used to modulate the gates but whose information won't be stored internally
in the MC-LSTM cells.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Outgoing mass and memory cells per time step of shape [sequence length, batch size, hidden size]
"""
_, batch_size, _ = x_m.size()
ct = x_m.new_zeros((batch_size, self._hidden_size))
m_out, c = [], []
for xt_m, xt_a in zip(x_m, x_a):
mt_out, ct = self._step(xt_m, xt_a, ct)
m_out.append(mt_out)
c.append(ct)
m_out, c = torch.stack(m_out), torch.stack(c)
return m_out, c
def _step(self, xt_m, xt_a, c):
""" Make a single time step in the MCLSTM. """
features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1)
i = self.input
# ... truncated (>4000 chars) for memory efficiency |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.