entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|
Fusion | from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.nn.init
class Fusion(nn.Module):
def __init__(self, opt):
super(Fusion, self).__init__()
self.f_size = opt.embed_size
self.gate0 = nn.Linear(self.f_size, self.f_size)
self.gate1 = nn.Linear(self.f_size, self.f_size)
self.fusion0 = nn.Linear(self.f_size, self.f_size)
self.fusion1 = nn.Linear(self.f_size, self.f_size)
def forward(self, vec1, vec2):
features_1 = self.gate0(vec1)
features_2 = self.gate1(vec2)
t = torch.sigmoid(self.fusion0(features_1) + self.fusion1(features_2))
f = t * features_1 + (1 - t) * features_2
return f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(embed_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x2, xmask)
tmp12 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp7
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_7, (4, 4), (1, 4
), 0), out=buf2)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_9, (4, 4), (1, 4
), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_0[grid(256)](buf4, primals_8,
buf3, primals_10, buf0, buf1, buf5, 256, XBLOCK=256, num_warps=
4, num_stages=1)
del buf3
del primals_10
del primals_8
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), buf1, buf4, primals_9, primals_7
class FusionNew(nn.Module):
def __init__(self, opt):
super(FusionNew, self).__init__()
self.f_size = opt.embed_size
self.gate0 = nn.Linear(self.f_size, self.f_size)
self.gate1 = nn.Linear(self.f_size, self.f_size)
self.fusion0 = nn.Linear(self.f_size, self.f_size)
self.fusion1 = nn.Linear(self.f_size, self.f_size)
def forward(self, input_0, input_1):
primals_1 = self.gate0.weight
primals_2 = self.gate0.bias
primals_4 = self.gate1.weight
primals_5 = self.gate1.bias
primals_7 = self.fusion0.weight
primals_8 = self.fusion0.bias
primals_9 = self.fusion1.weight
primals_10 = self.fusion1.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| kywen1119/DSRAN | Fusion | false | 15,869 | [
"Apache-2.0"
] | 56 | eb5e515c8d9e527de493f32b62469107a9d398e7 | https://github.com/kywen1119/DSRAN/tree/eb5e515c8d9e527de493f32b62469107a9d398e7 |
pdice_loss | import torch
import torch.nn as nn
import torch.utils.model_zoo
class pdice_loss(nn.Module):
def __init__(self, batch=True):
super(pdice_loss, self).__init__()
self.batch = batch
def soft_dice_coeff(self, y_true, y_pred, p):
smooth = 0.0
if self.batch:
pmap = p.clone()
pmap[pmap >= 0.8] = 1
pmap[pmap < 0.8] = 0
y_true_th = y_true * pmap
y_pred_th = y_pred * pmap
i = torch.sum(y_true_th)
j = torch.sum(y_pred_th)
intersection = torch.sum(y_true_th * y_pred_th)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2.0 * intersection + smooth) / (i + j + smooth)
return score.mean()
def soft_dice_loss(self, y_true, y_pred, pmap):
loss = 1 - self.soft_dice_coeff(y_true, y_pred, pmap)
return loss
def forward(self, y_pred, y_true, pmap):
b = self.soft_dice_loss(y_true, y_pred, pmap)
return b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp1 = 0.8
tmp2 = tmp0 >= tmp1
tmp3 = 1.0
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp5 = tmp4 < tmp1
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp6, tmp4)
tmp9 = tmp8 * tmp7
tmp11 = tmp10 * tmp7
tmp12 = tmp9 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tl.broadcast_to(tmp9, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = tl.broadcast_to(tmp11, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 2.0
tmp23 = tmp15 * tmp22
tmp24 = tmp23 + tmp6
tmp25 = tmp18 + tmp21
tmp26 = tmp25 + tmp6
tmp27 = tmp24 / tmp26
tmp28 = tmp27 / tmp3
tmp29 = tmp3 - tmp28
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf5 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_div_index_put_lift_fresh_mean_mul_rsub_sum_0[grid
(1)](buf5, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf5,
class pdice_lossNew(nn.Module):
def __init__(self, batch=True):
super(pdice_lossNew, self).__init__()
self.batch = batch
def soft_dice_coeff(self, y_true, y_pred, p):
smooth = 0.0
if self.batch:
pmap = p.clone()
pmap[pmap >= 0.8] = 1
pmap[pmap < 0.8] = 0
y_true_th = y_true * pmap
y_pred_th = y_pred * pmap
i = torch.sum(y_true_th)
j = torch.sum(y_pred_th)
intersection = torch.sum(y_true_th * y_pred_th)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2.0 * intersection + smooth) / (i + j + smooth)
return score.mean()
def soft_dice_loss(self, y_true, y_pred, pmap):
loss = 1 - self.soft_dice_coeff(y_true, y_pred, pmap)
return loss
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| manuel-rdz/SGL-Retinal-Vessel-Segmentation | pdice_loss | false | 15,999 | [
"MIT"
] | 45 | 7897d977e77aa0b5d3acb86e0aa74c6829d67415 | https://github.com/manuel-rdz/SGL-Retinal-Vessel-Segmentation/tree/7897d977e77aa0b5d3acb86e0aa74c6829d67415 |
Normalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qy/cqysf25xji52jc2mxglj4vmchyuhxdc5l4jtyhvmy3x5dkvuoaqh.py
# Topologically Sorted Source Nodes: [norm, truediv], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# norm => pow_1, pow_2, sum_1
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [4], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, truediv], Original ATen: [aten.linalg_vector_norm, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0[grid(1024)](arg0_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| phuochieu212/PointGLR | Normalize | false | 16,249 | [
"MIT"
] | 104 | 37017b1af31486aa9d516a3762725a650dca9ad1 | https://github.com/phuochieu212/PointGLR/tree/37017b1af31486aa9d516a3762725a650dca9ad1 |
BartClassificationHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# hidden_states_2 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vz/cvzje67emefmtrrwfoiqmxlqwwubkdlwr3p3l5lnagwt3ifl22gu.py
# Topologically Sorted Source Nodes: [sent_scores_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# sent_scores_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %primals_6), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sent_scores_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf3, primals_6, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, primals_6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(256)](buf3, primals_6, buf4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf4, primals_6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, buf3, primals_4
class BartClassificationHeadNew(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: 'int', inner_dim: 'int', pooler_dropout:
'float'):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, 1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| sajastu/transformers-sent-curr | BartClassificationHead | false | 4,238 | [
"Apache-2.0"
] | 0 | 6dc41545c4ac298a010090fbca4b454c2eaf3dbb | https://github.com/sajastu/transformers-sent-curr/tree/6dc41545c4ac298a010090fbca4b454c2eaf3dbb |
Mask | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Mask(nn.Module):
def forward(self, seq, mask):
seq_mask = torch.unsqueeze(mask, 2)
seq_mask = torch.transpose(seq_mask.repeat(1, 1, seq.size()[1]), 1, 2)
return seq.where(torch.eq(seq_mask, 1), torch.zeros_like(seq))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_eq_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp5, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_eq_where_zeros_like_0[grid(16, 4)](arg0_1, arg1_1,
buf0, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MaskNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| HarshCasper/nni | Mask | false | 5,273 | [
"MIT"
] | 1 | 291bbbba9f296382015a77b2c88eb5db5b44bf94 | https://github.com/HarshCasper/nni/tree/291bbbba9f296382015a77b2c88eb5db5b44bf94 |
Ranking | import torch
class Ranking(torch.nn.Module):
def __init__(self, delta, use_cosine_similarity):
super(Ranking, self).__init__()
self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
self.measure_similarity = self._get_similarity_function(
use_cosine_similarity)
self.delta = delta
self.criterion = torch.nn.MSELoss(reduction='sum')
if not use_cosine_similarity:
dim = 64
self.projector = torch.nn.Linear(dim, dim, bias=False)
def _get_similarity_function(self, use_cosine_similarity):
if use_cosine_similarity:
self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
return self._cosine_simililarity
else:
return self._metrics_similarity
def _metrics_similarity(self, x, y):
return torch.sum(torch.square(self.projector(x) - self.projector(y)
), dim=1)
def _cosine_simililarity(self, x, y):
v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))
return v
def forward(self, zis, zjs, z_anchor):
"""
:param zis: similar to anchor
:param zjs: dissimilar to anchor
:param z_anchor: anchor image
:return:
"""
s1 = self.measure_similarity(zis, z_anchor)
s2 = self.measure_similarity(zjs, z_anchor)
margin = torch.clamp(s2 - s1 + self.delta, min=0, max=1.0)
loss = self.criterion(margin, torch.zeros_like(margin))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'delta': 4, 'use_cosine_similarity': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 256
x4 = xindex % 64
x1 = xindex // 4 % 16
x5 = xindex % 256
x6 = xindex // 4 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1 + 64 * x3), xmask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1 + 64 * x3), xmask, eviction_policy
='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1 + 64 * x3), xmask, eviction_policy
='evict_last')
tmp16 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + 4 * x6, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (1 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (2 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + (3 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr2 + (x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr2 + (4 * x1 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + (1 + 4 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (2 + 4 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp41 = tl.load(in_ptr2 + (3 + 4 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tmp34 = tmp33 * tmp33
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp42 = tmp41 * tmp41
tmp43 = tmp40 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = triton_helpers.maximum(tmp44, tmp13)
tmp46 = tmp32 / tmp45
tmp47 = tmp46 * tmp30
tl.store(out_ptr0 + x7, tmp31, xmask)
tl.store(out_ptr1 + x7, tmp47, xmask)
@triton.jit
def triton_per_fused_add_clamp_mse_loss_sub_sum_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 - tmp13
tmp15 = 4.0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = 1.0
tmp20 = triton_helpers.minimum(tmp18, tmp19)
tmp21 = tmp20 * tmp20
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(1024)](
arg2_1, arg1_1, arg0_1, buf0, buf1, 1024, XBLOCK=128, num_warps
=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_mse_loss_sub_sum_1[grid(1)](buf0, buf1,
buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf1
return buf2,
class RankingNew(torch.nn.Module):
def __init__(self, delta, use_cosine_similarity):
super(RankingNew, self).__init__()
self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
self.measure_similarity = self._get_similarity_function(
use_cosine_similarity)
self.delta = delta
self.criterion = torch.nn.MSELoss(reduction='sum')
if not use_cosine_similarity:
dim = 64
self.projector = torch.nn.Linear(dim, dim, bias=False)
def _get_similarity_function(self, use_cosine_similarity):
if use_cosine_similarity:
self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
return self._cosine_simililarity
else:
return self._metrics_similarity
def _metrics_similarity(self, x, y):
return torch.sum(torch.square(self.projector(x) - self.projector(y)
), dim=1)
def _cosine_simililarity(self, x, y):
v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))
return v
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| alexcapstick/minder_utils | Ranking | false | 3,084 | [
"MIT"
] | 0 | 3bb9380b7796b5dd5b995ce1839ea6a94321021d | https://github.com/alexcapstick/minder_utils/tree/3bb9380b7796b5dd5b995ce1839ea6a94321021d |
DataProcessor | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/uy/cuycdscoudbvmeflowbbzaw3yf3wik6o5e6vrmzupqgeqznvnalm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._adaptive_avg_pool2d]
# Source node to ATen node mapping:
# x => _adaptive_avg_pool2d
# Graph fragment:
# %_adaptive_avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [7, 7]), kwargs = {})
triton_poi_fused__adaptive_avg_pool2d_0 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 196
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = ((4*x1) // 7)
tmp1 = ((10 + (4*x1)) // 7)
tmp2 = tmp0 < tmp1
tmp3 = ((4*x0) // 7)
tmp4 = ((10 + (4*x0)) // 7)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((4*((4*x1) // 7)) + (16*x2) + ((4*x0) // 7)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + ((4*x0) // 7)
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + (4*((4*x1) // 7)) + (16*x2) + ((4*x0) // 7)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + ((4*x1) // 7)
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + (4*((4*x1) // 7)) + (16*x2) + ((4*x0) // 7)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + (4*((4*x1) // 7)) + (16*x2) + ((4*x0) // 7)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr0 + (x4), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 7, 7), (49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._adaptive_avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused__adaptive_avg_pool2d_0.run(arg0_1, buf0, 196, grid=grid(196), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (49, 4), (1, 49), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 196
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = 4 * x1 // 7
tmp1 = (10 + 4 * x1) // 7
tmp2 = tmp0 < tmp1
tmp3 = 4 * x0 // 7
tmp4 = (10 + 4 * x0) // 7
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 7) + 16 * x2 + 4 * x0 // 7),
tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + 4 * x0 // 7
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 7) + 16 * x2 + 4 * x0 //
7), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + 4 * x1 // 7
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 7) + 16 * x2 + 4 * x0 //
7), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 7) + 16 * x2 + 4 * x0 //
7), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr0 + x4, tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 7, 7), (49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__adaptive_avg_pool2d_0[grid(196)](arg0_1, buf0,
196, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (49, 4), (1, 49), 0),
class DataProcessorNew(nn.Module):
def __init__(self):
super(DataProcessorNew, self).__init__()
self.pool = nn.AdaptiveAvgPool2d((7, 7))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| jianqingxie/RSTNet | DataProcessor | false | 15,684 | [
"BSD-3-Clause"
] | 68 | aaa7b5be08e5ec9e79e14ed3e6a04fc3d50483be | https://github.com/jianqingxie/RSTNet/tree/aaa7b5be08e5ec9e79e14ed3e6a04fc3d50483be |
EqualConvTranspose2d | import math
import torch
import torch.nn.functional as F
from torch import nn
class EqualConvTranspose2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_channel, out_channel,
kernel_size, kernel_size))
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
out = F.conv_transpose2d(input, self.weight * self.scale, bias=self
.bias, stride=self.stride, padding=self.padding)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[0]}, {self.weight.shape[1]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(784)](buf2, primals_2, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_3, buf0
class EqualConvTranspose2dNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1,
padding=0, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_channel, out_channel,
kernel_size, kernel_size))
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[0]}, {self.weight.shape[1]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| PeterouZh/CIPS-3D | EqualConvTranspose2d | false | 14,169 | [
"MIT"
] | 308 | 9b8bfa0fb23f642af042e150ccd70408f9d137c6 | https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6 |
BatchNorm | import torch
import numpy as np
from torch import tensor
import torch.nn as nn
import numpy.random as rng
class BaseFlow(nn.Module):
""" """
def __init__(self, n_inputs, **kwargs):
super(BaseFlow, self).__init__()
self.n_inputs = n_inputs
def forward(self, x, **kwargs):
raise NotImplementedError
def generate_samples(self, n_samples=1, u=None, **kwargs):
raise NotImplementedError
def log_likelihood(self, x, **kwargs):
""" Calculates log p(x) with a Gaussian base density """
u, logdet_dudx = self.forward(x, **kwargs)
constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))
log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1
) + logdet_dudx
return u, log_likelihood
def log_likelihood_and_score(self, x, **kwargs):
""" Calculates log p(x) and t(x) with a Gaussian base density """
u, log_likelihood = self.log_likelihood(x, **kwargs)
return u, log_likelihood, None
class BatchNorm(BaseFlow):
"""BatchNorm implementation"""
def __init__(self, n_inputs, alpha=0.1, eps=1e-05):
super(BatchNorm, self).__init__(n_inputs)
self.n_inputs = n_inputs
self.alpha = alpha
self.eps = eps
self.calculated_running_mean = False
self.running_mean = torch.zeros(self.n_inputs)
self.running_var = torch.zeros(self.n_inputs)
def forward(self, x, fixed_params=False):
"""Calculates x -> u(x) (batch norming)"""
if fixed_params:
mean = self.running_mean
var = self.running_var
else:
mean = torch.mean(x, dim=0)
var = torch.mean((x - mean) ** 2, dim=0) + self.eps
if not self.calculated_running_mean:
self.running_mean = mean
self.running_var = var
else:
self.running_mean = (1.0 - self.alpha
) * self.running_mean + self.alpha * mean
self.running_var = (1.0 - self.alpha
) * self.running_var + self.alpha * var
self.calculated_running_mean = True
u = (x - mean) / torch.sqrt(var)
logdet = -0.5 * torch.sum(torch.log(var))
return u, logdet
def inverse(self, u):
"""Calculates u -> x(u) (the approximate inverse transformation based on running mean and variance)"""
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def generate_samples(self, n_samples=1, u=None, **kwargs):
if u is None:
u = tensor(rng.randn(n_samples, self.n_inputs))
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def to(self, *args, **kwargs):
logger.debug('Transforming BatchNorm to %s', args)
self = super()
self.running_mean = self.running_mean
self.running_var = self.running_var
return self
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_inputs': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import tensor
import torch.nn as nn
import numpy.random as rng
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = -0.5
tmp28 = tmp26 * tmp27
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp28, None)
@triton.jit
def triton_poi_fused_div_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = libdevice.sqrt(tmp3)
tmp5 = tmp2 / tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_pow_sub_sum_0[grid(1)](buf4,
arg0_1, buf0, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_sqrt_sub_1[grid(256)](arg0_1, buf0, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf2, buf4, buf1, buf0
class BaseFlow(nn.Module):
""" """
def __init__(self, n_inputs, **kwargs):
super(BaseFlow, self).__init__()
self.n_inputs = n_inputs
def forward(self, x, **kwargs):
raise NotImplementedError
def generate_samples(self, n_samples=1, u=None, **kwargs):
raise NotImplementedError
def log_likelihood(self, x, **kwargs):
""" Calculates log p(x) with a Gaussian base density """
u, logdet_dudx = self.forward(x, **kwargs)
constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))
log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1
) + logdet_dudx
return u, log_likelihood
def log_likelihood_and_score(self, x, **kwargs):
""" Calculates log p(x) and t(x) with a Gaussian base density """
u, log_likelihood = self.log_likelihood(x, **kwargs)
return u, log_likelihood, None
class BatchNormNew(BaseFlow):
"""BatchNorm implementation"""
def __init__(self, n_inputs, alpha=0.1, eps=1e-05):
super(BatchNormNew, self).__init__(n_inputs)
self.n_inputs = n_inputs
self.alpha = alpha
self.eps = eps
self.calculated_running_mean = False
self.running_mean = torch.zeros(self.n_inputs)
self.running_var = torch.zeros(self.n_inputs)
def inverse(self, u):
"""Calculates u -> x(u) (the approximate inverse transformation based on running mean and variance)"""
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def generate_samples(self, n_samples=1, u=None, **kwargs):
if u is None:
u = tensor(rng.randn(n_samples, self.n_inputs))
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def to(self, *args, **kwargs):
logger.debug('Transforming BatchNorm to %s', args)
self = super()
self.running_mean = self.running_mean
self.running_var = self.running_var
return self
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| dlvp/madminer | BatchNorm | false | 1,848 | [
"MIT"
] | 0 | 4ae7d9b73452848a6c9d1b81b50ef316ff7a054f | https://github.com/dlvp/madminer/tree/4ae7d9b73452848a6c9d1b81b50ef316ff7a054f |
Tanh | import math
import torch
class Tanh(torch.nn.Tanh):
"""
Class that extends ``torch.nn.Tanh`` additionally computing the log diagonal
blocks of the Jacobian.
"""
def forward(self, inputs, grad: 'torch.Tensor'=None):
"""
Parameters
----------
inputs : ``torch.Tensor``, required.
The input tensor.
grad : ``torch.Tensor``, optional (default = None).
The log diagonal blocks of the partial Jacobian of previous transformations.
Returns
-------
The output tensor and the log diagonal blocks of the partial log-Jacobian of previous
transformations combined with this transformation.
"""
g = -2 * (inputs - math.log(2) + torch.nn.functional.softplus(-2 *
inputs))
return torch.tanh(inputs), g.view(grad.shape
) + grad if grad is not None else g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_softplus_sub_tanh_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 0.6931471805599453
tmp3 = tmp0 - tmp2
tmp4 = -2.0
tmp5 = tmp0 * tmp4
tmp6 = 20.0
tmp7 = tmp5 > tmp6
tmp8 = tl_math.exp(tmp5)
tmp9 = libdevice.log1p(tmp8)
tmp10 = tl.where(tmp7, tmp5, tmp9)
tmp11 = tmp3 + tmp10
tmp12 = tmp11 * tmp4
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_softplus_sub_tanh_0[grid(256)](arg0_1,
buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0, buf1
class TanhNew(torch.nn.Tanh):
"""
Class that extends ``torch.nn.Tanh`` additionally computing the log diagonal
blocks of the Jacobian.
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| gndctrl2mjrtm/BNAF | Tanh | false | 12,606 | [
"MIT"
] | 0 | a8ecaa2844b5338f9091e58dd571fdc6a598e2f1 | https://github.com/gndctrl2mjrtm/BNAF/tree/a8ecaa2844b5338f9091e58dd571fdc6a598e2f1 |
BertAttention | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class BertSelfAttention(nn.Module):
"""
self attention层
原理可看这篇博客: http://jalammar.github.io/illustrated-transformer/
"""
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
"""
实现 self attention + Add & Norm
"""
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5,
layer_norm_eps=1)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf13, primals_3,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf13, primals_3,
buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf14
del buf15
del primals_12
return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9
class BertSelfAttention(nn.Module):
"""
self attention层
原理可看这篇博客: http://jalammar.github.io/illustrated-transformer/
"""
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
"""
实现 self attention + Add & Norm
"""
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| techthiyanes/nlp-notebook | BertAttention | false | 16,582 | [
"MIT"
] | 136 | 0e5f4b75e635128d4056c89a6c65bea60c15e836 | https://github.com/techthiyanes/nlp-notebook/tree/0e5f4b75e635128d4056c89a6c65bea60c15e836 |
DirichletPolicySingleLayer | import torch
import numpy as np
import torch.nn.functional as F
import torch.distributions as td
import torch.nn as nn
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicySingleLayer(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer_size=256,
min_alpha=-np.inf, max_alpha=np.inf):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer_size)
self.linear2 = nn.Linear(hidden_layer_size, action_dim)
nn.init.normal_(self.linear1.weight, std=0.001)
nn.init.normal_(self.linear1.bias, std=0.001)
nn.init.normal_(self.linear2.weight, std=0.001)
nn.init.normal_(self.linear2.bias, std=0.001)
def forward(self, state):
x = F.relu(self.linear1(state))
action = self.max_alpha * F.sigmoid(self.linear2(x))
return torch.clamp(action, self.min_alpha, self.max_alpha)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.distributions as td
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_clamp_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = float('inf')
tmp3 = tmp1 * tmp2
tmp4 = float('-inf')
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf4, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_mul_sigmoid_1[grid(256)](buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), buf2, primals_4, buf4
class PolicyNetwork(nn.Module):
"""Base class for stochastic policy networks."""
def __init__(self):
super().__init__()
def forward(self, state):
"""Take state as input, then output the parameters of the policy."""
raise NotImplementedError('forward not implemented.')
def sample(self, state):
"""
Sample an action based on the model parameters given the current state.
"""
raise NotImplementedError('sample not implemented.')
class DirichletPolicyBase(PolicyNetwork):
"""
Base class for Dirichlet policies.
Desired network needs to be implemented.
"""
def __init__(self, min_alpha=-np.inf, max_alpha=np.inf):
super().__init__()
self.min_alpha = min_alpha
self.max_alpha = max_alpha
def sample(self, state, no_log_prob=False):
alpha = self.forward(state)
dist = td.Dirichlet(alpha)
action = dist.sample()
return action if no_log_prob else (action, dist.log_prob(action))
class DirichletPolicySingleLayerNew(DirichletPolicyBase):
"""Working, single-layer Dirichlet policy network."""
def __init__(self, state_dim, action_dim, hidden_layer_size=256,
min_alpha=-np.inf, max_alpha=np.inf):
super().__init__(min_alpha, max_alpha)
self.linear1 = nn.Linear(state_dim, hidden_layer_size)
self.linear2 = nn.Linear(hidden_layer_size, action_dim)
nn.init.normal_(self.linear1.weight, std=0.001)
nn.init.normal_(self.linear1.bias, std=0.001)
nn.init.normal_(self.linear2.weight, std=0.001)
nn.init.normal_(self.linear2.bias, std=0.001)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| wessle/costaware | DirichletPolicySingleLayer | false | 11,004 | [
"MIT"
] | 0 | 151502308411528eaa703d353d138fc809e59d8e | https://github.com/wessle/costaware/tree/151502308411528eaa703d353d138fc809e59d8e |
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lj/cljcklk63iibdlae2mmlzlhvlmgpeclc6v7pcumo3oyly4zqpua7.py
# Topologically Sorted Source Nodes: [pt, sub, pow_1, mul, mul_1, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, log_1, mul_5, loss, mean], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.log, aten.sub, aten.mean]
# Source node to ATen node mapping:
# log => log
# log_1 => log_1
# loss => sub_3
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# pow_1 => pow_1
# pow_2 => pow_2
# pt => sigmoid
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 0.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %log_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp7 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -0.5
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.log(tmp1)
tmp10 = tmp8 * tmp9
tmp11 = tmp1 * tmp1
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp2 - tmp7
tmp15 = tmp13 * tmp14
tmp16 = tl_math.log(tmp3)
tmp17 = tmp15 * tmp16
tmp18 = tmp10 - tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pt, sub, pow_1, mul, mul_1, log, mul_2, pow_2, mul_3, sub_1, mul_4, sub_2, log_1, mul_5, loss, mean], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.log, aten.sub, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -0.5
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp9 = tl_math.log(tmp1)
tmp10 = tmp8 * tmp9
tmp11 = tmp1 * tmp1
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp2 - tmp7
tmp15 = tmp13 * tmp14
tmp16 = tl_math.log(tmp3)
tmp17 = tmp15 * tmp16
tmp18 = tmp10 - tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_log_mean_mul_pow_rsub_sigmoid_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLossNew(torch.nn.Module):
def __init__(self, gamma=2, alpha=0.5, size_average=True):
super(FocalLossNew, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.size_average = size_average
self.elipson = 1e-06
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| RuiBai1999/HiMatch | FocalLoss | false | 5,771 | [
"MIT"
] | 1 | 199ebc6b06b3cce2b3f2298cb9e20f81c01dc7a6 | https://github.com/RuiBai1999/HiMatch/tree/199ebc6b06b3cce2b3f2298cb9e20f81c01dc7a6 |
EncoderSteenkiste | import torch
from torch import nn
class EncoderSteenkiste(nn.Module):
def __init__(self, signal_size, latent_dim=10):
"""
Parameters
----------
signal_size : int for length of signal. Defaults to 30
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 4 convolutional layers (each with 32 channels), (4 x 4 kernel), (stride of 2)
- 2 fully connected layers (each of 256 units)
- Latent distribution:
- 1 fully connected layer of 20 units (log variance and mean for 10 Gaussians)
References:
[1] Burgess, Christopher P., et al. "Understanding disentangling in
$\\beta$-VAE." arXiv preprint arXiv:1804.03599 (2018).
"""
super(EncoderSteenkiste, self).__init__()
hidden_dim1 = 50
hidden_dim2 = 20
self.latent_dim = latent_dim
self.img_size = signal_size
signal_length = signal_size[2]
self.lin1 = nn.Linear(signal_length, hidden_dim1)
self.lin2 = nn.Linear(hidden_dim1, hidden_dim2)
self.lin3 = nn.Linear(hidden_dim2, latent_dim)
self.mu_logvar_gen = nn.Linear(latent_dim, self.latent_dim * 2)
def forward(self, x):
x = torch.relu(self.lin1(x))
x = torch.relu(self.lin2(x))
x = torch.relu(self.lin3(x))
mu_logvar = self.mu_logvar_gen(x)
mu, logvar = mu_logvar.view(-1, self.latent_dim, 2).unbind(-1)
return mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'signal_size': [4, 4, 4]}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (20, 50), (50, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (10, 20), (20, 1))
assert_size_stride(primals_7, (10,), (1,))
assert_size_stride(primals_8, (20, 10), (10, 1))
assert_size_stride(primals_9, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1,
primals_2, buf9, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0),
reinterpret_tensor(primals_4, (50, 20), (1, 50), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 20), (320, 80, 20, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(1280)](buf3,
primals_5, buf8, 1280, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 20), (20, 1), 0),
reinterpret_tensor(primals_6, (20, 10), (1, 20), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 10), (160, 40, 10, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(640)](buf5,
primals_7, buf7, 640, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 10),
(10, 1), 0), reinterpret_tensor(primals_8, (10, 20), (1, 10), 0
), alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (64, 10), (20, 2), 0), reinterpret_tensor(
buf6, (64, 10), (20, 2), 1), reinterpret_tensor(primals_3, (64, 4),
(4, 1), 0), reinterpret_tensor(buf1, (64, 50), (50, 1), 0
), reinterpret_tensor(buf3, (64, 20), (20, 1), 0), reinterpret_tensor(
buf5, (64, 10), (10, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class EncoderSteenkisteNew(nn.Module):
def __init__(self, signal_size, latent_dim=10):
"""
Parameters
----------
signal_size : int for length of signal. Defaults to 30
latent_dim : int
Dimensionality of latent output.
Model Architecture (transposed for decoder)
------------
- 4 convolutional layers (each with 32 channels), (4 x 4 kernel), (stride of 2)
- 2 fully connected layers (each of 256 units)
- Latent distribution:
- 1 fully connected layer of 20 units (log variance and mean for 10 Gaussians)
References:
[1] Burgess, Christopher P., et al. "Understanding disentangling in
$\\beta$-VAE." arXiv preprint arXiv:1804.03599 (2018).
"""
super(EncoderSteenkisteNew, self).__init__()
hidden_dim1 = 50
hidden_dim2 = 20
self.latent_dim = latent_dim
self.img_size = signal_size
signal_length = signal_size[2]
self.lin1 = nn.Linear(signal_length, hidden_dim1)
self.lin2 = nn.Linear(hidden_dim1, hidden_dim2)
self.lin3 = nn.Linear(hidden_dim2, latent_dim)
self.mu_logvar_gen = nn.Linear(latent_dim, self.latent_dim * 2)
def forward(self, input_0):
primals_1 = self.lin1.weight
primals_2 = self.lin1.bias
primals_4 = self.lin2.weight
primals_5 = self.lin2.bias
primals_6 = self.lin3.weight
primals_7 = self.lin3.bias
primals_8 = self.mu_logvar_gen.weight
primals_9 = self.mu_logvar_gen.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| jnsrch/disentangling-vae-cwt | EncoderSteenkiste | false | 15,720 | [
"MIT"
] | 581 | 0e927bdcd3d149cadb30aa107331f0c071138c41 | https://github.com/jnsrch/disentangling-vae-cwt/tree/0e927bdcd3d149cadb30aa107331f0c071138c41 |
FilterResponseNorm_layer | import torch
import torch.nn as nn
class FilterResponseNorm_layer(nn.Module):
def __init__(self, num_filters, eps=1e-06):
super(FilterResponseNorm_layer, self).__init__()
self.eps = eps
par_shape = 1, num_filters, 1, 1
self.tau = torch.nn.Parameter(torch.zeros(par_shape))
self.beta = torch.nn.Parameter(torch.zeros(par_shape))
self.gamma = torch.nn.Parameter(torch.ones(par_shape))
def forward(self, x):
nu2 = torch.mean(torch.square(x), dim=[2, 3], keepdim=True)
x = x * 1 / torch.sqrt(nu2 + self.eps)
y = self.gamma * x + self.beta
z = torch.max(y, self.tau)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_filters': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp11 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp12 = 1.0
tmp13 = tmp0 * tmp12
tmp14 = tmp13 / tmp10
tmp15 = tmp11 * tmp14
tmp17 = tmp15 + tmp16
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0[grid(16)](buf1,
primals_1, primals_2, primals_3, primals_4, buf2, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
return buf2, primals_1, primals_2, primals_3, primals_4, buf1
class FilterResponseNorm_layerNew(nn.Module):
def __init__(self, num_filters, eps=1e-06):
super(FilterResponseNorm_layerNew, self).__init__()
self.eps = eps
par_shape = 1, num_filters, 1, 1
self.tau = torch.nn.Parameter(torch.zeros(par_shape))
self.beta = torch.nn.Parameter(torch.zeros(par_shape))
self.gamma = torch.nn.Parameter(torch.ones(par_shape))
def forward(self, input_0):
primals_2 = self.tau
primals_3 = self.beta
primals_4 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| deebuls/pytorch-cifar | FilterResponseNorm_layer | false | 1,815 | [
"MIT"
] | 0 | c6d9b16eeb00418d8c4f4f4c1e97f141c1f7d198 | https://github.com/deebuls/pytorch-cifar/tree/c6d9b16eeb00418d8c4f4f4c1e97f141c1f7d198 |
DisentangledAELatent | import torch
class DisentangledAELatent(torch.nn.Module):
"""Dense Dientangled Latent Layer between encoder and decoder"""
def __init__(self, hidden_size: 'int', latent_size: 'int', dropout: 'float'
):
super(DisentangledAELatent, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.dropout = dropout
self.latent = torch.nn.Linear(self.hidden_size, self.latent_size * 2)
@staticmethod
def reparameterize(mu, logvar, training=True):
if training:
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
return mu
def forward(self, x, training=True):
z_variables = self.latent(x)
mu, logvar = torch.chunk(z_variables, 2, dim=1)
z = self.reparameterize(mu, logvar, training=training)
return z, mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'latent_size': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (64 + x0 + 128 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + 128 * x1), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp0 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 2, 4, 8), (64, 32, 8, 1), torch.float32)
buf2 = torch.ops.aten.normal_functional.default(buf1)
buf3 = buf2
del buf2
buf4 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_exp_mul_0[grid(256)](buf3, buf0, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf4, reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 0
), reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 64
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 64), buf3
class DisentangledAELatentNew(torch.nn.Module):
"""Dense Dientangled Latent Layer between encoder and decoder"""
def __init__(self, hidden_size: 'int', latent_size: 'int', dropout: 'float'
):
super(DisentangledAELatentNew, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.dropout = dropout
self.latent = torch.nn.Linear(self.hidden_size, self.latent_size * 2)
@staticmethod
def reparameterize(mu, logvar, training=True):
if training:
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
return mu
def forward(self, input_0):
primals_1 = self.latent.weight
primals_2 = self.latent.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1], output[2]
| Saran-nns/traja | DisentangledAELatent | false | 1,019 | [
"MIT"
] | 0 | f2256cc47abd33377b3a87f110f4c8da1cf6765f | https://github.com/Saran-nns/traja/tree/f2256cc47abd33377b3a87f110f4c8da1cf6765f |
PSNR | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/np/cnp5peaudws3oiaugbrawibehsddo2ovc64uhbacz4txujiu7hni.py
# Topologically Sorted Source Nodes: [mse, add, log10, mul], Original ATen: [aten.mse_loss, aten.add, aten.log10, aten.mul]
# Source node to ATen node mapping:
# add => add
# log10 => log10
# mse => mean, pow_1, sub
# mul => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-12), kwargs = {})
# %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, -10), kwargs = {})
triton_per_fused_add_log10_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_log10_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log10_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1e-12
tmp10 = tmp8 + tmp9
tmp11 = libdevice.log10(tmp10)
tmp12 = -10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse, add, log10, mul], Original ATen: [aten.mse_loss, aten.add, aten.log10, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_log10_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1e-12
tmp10 = tmp8 + tmp9
tmp11 = libdevice.log10(tmp10)
tmp12 = -10.0
tmp13 = tmp11 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log10_mse_loss_mul_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PSNRNew(th.nn.Module):
def __init__(self):
super(PSNRNew, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| zsinsense/demosaicnet | PSNR | false | 13,177 | [
"MIT"
] | 0 | bbe8151cab86dbe46b76806cf9ec353994b389ff | https://github.com/zsinsense/demosaicnet/tree/bbe8151cab86dbe46b76806cf9ec353994b389ff |
SymKlCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/wv/cwvti54lsojjpkh6f73xsvv55wjtno2rrmtjgaznlsjst37yn74a.py
# Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax_1 => amax_2, sub_4
# softmax => amax_1, exp_1, sub_2
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {})
# %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_2), kwargs = {})
triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7m/c7m62lqyfgv73n2cb7qnzr2xodmurvv7yy6z2m7nhu5m3pfdjjxe.py
# Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# softmax_1 => amax_3, exp_3, sub_6
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_6,), kwargs = {})
triton_poi_fused__log_softmax__softmax_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/t4/ct4y23pxxoeimylfghb3zgxs2sgexranitqqpowxqihbyqj4rh4q.py
# Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean, aten.add]
# Source node to ATen node mapping:
# kl_div => eq, full_default, full_default_1, isnan, log_1, mean, mul, mul_1, sub_3, where, where_1
# kl_div_1 => eq_1, full_default_2, full_default_3, isnan_1, log_3, mean_1, mul_2, mul_3, sub_7, where_2, where_3
# log_softmax => exp, log, sub_1, sum_1
# log_softmax_1 => exp_2, log_2, sub_5, sum_3
# loss => add
# loss_1 => mul_4
# softmax => div, sum_2
# softmax_1 => div_1, sum_4
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {})
# %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [-1], True), kwargs = {})
# %div_1 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_4), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_1,), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_1, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %log_3), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %mul_3), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan_1, %full_default_3, %where_2), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [-1], True), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %log_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sub_5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_3, %mul_2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_7,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {})
triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2 = async_compile.triton('triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp18 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp37 = tl.load(in_ptr2 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr2 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr2 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp52 = tl.load(in_ptr3 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp60 = tl.load(in_ptr3 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float("nan")
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = _tmp34 + tmp33
_tmp34 = tl.where(rmask, tmp35, _tmp34)
tmp39 = tmp37 + tmp38
tmp41 = tmp39 + tmp40
tmp43 = tmp41 + tmp42
tmp44 = tmp36 / tmp43
tmp45 = libdevice.isnan(tmp44).to(tl.int1)
tmp46 = tmp44 == tmp10
tmp47 = tl_math.log(tmp44)
tmp48 = tmp44 * tmp47
tmp49 = tl.where(tmp46, tmp10, tmp48)
tmp50 = tl.where(tmp45, tmp15, tmp49)
tmp53 = tl_math.exp(tmp52)
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp63 = tl_math.log(tmp62)
tmp64 = tmp51 - tmp63
tmp65 = tmp44 * tmp64
tmp66 = tmp50 - tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = _tmp68 + tmp67
_tmp68 = tl.where(rmask, tmp69, _tmp68)
tmp34 = tl.sum(_tmp34, 1)[:, None]
tmp68 = tl.sum(_tmp68, 1)[:, None]
tmp70 = 256.0
tmp71 = tmp34 / tmp70
tmp72 = tmp68 / tmp70
tmp73 = tmp71 + tmp72
tmp74 = 1.0
tmp75 = tmp73 * tmp74
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp75, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, log_softmax_1], Original ATen: [aten._softmax, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0.run(arg1_1, buf0, buf6, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax, softmax_1], Original ATen: [aten._log_softmax, aten._softmax]
triton_poi_fused__log_softmax__softmax_1.run(arg0_1, buf2, buf4, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax, kl_div, log_softmax, softmax_1, kl_div_1, log_softmax_1, loss, loss_1], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.mean, aten.add]
triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2.run(buf8, buf0, buf2, buf4, buf6, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf2
del buf4
del buf6
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.loss import _Loss
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2(in_out_ptr0
, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp68 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp37 = tl.load(in_ptr2 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tl.load(in_ptr2 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tl.load(in_ptr2 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr2 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp52 = tl.load(in_ptr3 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.load(in_ptr3 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp57 = tl.load(in_ptr3 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp60 = tl.load(in_ptr3 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = _tmp34 + tmp33
_tmp34 = tl.where(rmask, tmp35, _tmp34)
tmp39 = tmp37 + tmp38
tmp41 = tmp39 + tmp40
tmp43 = tmp41 + tmp42
tmp44 = tmp36 / tmp43
tmp45 = libdevice.isnan(tmp44).to(tl.int1)
tmp46 = tmp44 == tmp10
tmp47 = tl_math.log(tmp44)
tmp48 = tmp44 * tmp47
tmp49 = tl.where(tmp46, tmp10, tmp48)
tmp50 = tl.where(tmp45, tmp15, tmp49)
tmp53 = tl_math.exp(tmp52)
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp53 + tmp55
tmp58 = tl_math.exp(tmp57)
tmp59 = tmp56 + tmp58
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp59 + tmp61
tmp63 = tl_math.log(tmp62)
tmp64 = tmp51 - tmp63
tmp65 = tmp44 * tmp64
tmp66 = tmp50 - tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = _tmp68 + tmp67
_tmp68 = tl.where(rmask, tmp69, _tmp68)
tmp34 = tl.sum(_tmp34, 1)[:, None]
tmp68 = tl.sum(_tmp68, 1)[:, None]
tmp70 = 256.0
tmp71 = tmp34 / tmp70
tmp72 = tmp68 / tmp70
tmp73 = tmp71 + tmp72
tmp74 = 1.0
tmp75 = tmp73 * tmp74
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp75, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg1_1, buf0,
buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_1[grid(256)](arg0_1, buf2,
buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf8 = buf3
del buf3
triton_red_fused__log_softmax__softmax_add_mean_mul_sub_xlogy_2[grid(1)
](buf8, buf0, buf2, buf4, buf6, 1, 256, XBLOCK=1, RBLOCK=256,
num_warps=8, num_stages=1)
del buf0
del buf2
del buf4
del buf6
return buf8,
class Criterion(_Loss):
def __init__(self, alpha=1.0, name='criterion'):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight
"""
return
class SymKlCriterionNew(Criterion):
def __init__(self, alpha=1.0, name='KL Div Criterion'):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mahartmann/mt-dnn | SymKlCriterion | false | 10,479 | [
"MIT"
] | 0 | c9aa3379dc255fd8fc40f24b6cd508f6a645b32f | https://github.com/mahartmann/mt-dnn/tree/c9aa3379dc255fd8fc40f24b6cd508f6a645b32f |
RQLoss | from torch.nn import Module
import torch
from typing import cast
from torch.nn.modules import Module
import torch.nn.functional as F
class RQLoss(Module):
"""The RQ (backwards) loss between class probabilities and predictions.
This loss is defined in `'Resolving label uncertainty with implicit generative
models' <https://openreview.net/forum?id=AEa_UepnMDX>`_.
.. versionadded:: 0.2
"""
def forward(self, probs: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""Computes the RQ (backwards) loss on prior.
Args:
probs: probabilities of predictions, expected shape B x C x H x W
target: prior probabilities, expected shape B x C x H x W
Returns:
qr loss
"""
q = probs
z = q / q.norm(p=1, dim=(0, 2, 3), keepdim=True).clamp_min(1e-12
).expand_as(q)
r = F.normalize(z * target, p=1, dim=1)
loss = torch.einsum('bcxy,bcxy->bxy', r, torch.log(r) - torch.log(q)
).mean()
return cast(torch.Tensor, loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torch.nn.modules import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr1 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp28 = tl.load(in_ptr1 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tmp7 = tmp5 * tmp6
tmp8 = tl_math.abs(tmp7)
tmp12 = triton_helpers.maximum(tmp11, tmp3)
tmp13 = tmp9 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl_math.abs(tmp15)
tmp17 = tmp8 + tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp3)
tmp22 = tmp18 / tmp21
tmp24 = tmp22 * tmp23
tmp25 = tl_math.abs(tmp24)
tmp26 = tmp17 + tmp25
tmp30 = triton_helpers.maximum(tmp29, tmp3)
tmp31 = tmp27 / tmp30
tmp33 = tmp31 * tmp32
tmp34 = tl_math.abs(tmp33)
tmp35 = tmp26 + tmp34
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2 + 16 * y3), xmask & ymask)
tmp7 = tl.load(in_ptr3 + (x2 + 16 * y1), xmask & ymask, eviction_policy
='evict_last')
tmp2 = 1e-12
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 / tmp3
tmp6 = tmp4 * tmp5
tmp8 = triton_helpers.maximum(tmp7, tmp2)
tmp9 = tmp6 / tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tl_math.log(tmp0)
tmp12 = tmp10 - tmp11
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp9, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp12, xmask & ymask)
@triton.jit
def triton_per_fused_mean_3(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(4)](arg0_1, buf0, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_mul_1[grid(64)](arg0_1,
buf0, arg1_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 16)](arg0_1, buf0, arg1_1, buf1,
buf2, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1
)
del arg0_1
del arg1_1
del buf0
buf4 = reinterpret_tensor(buf1, (64, 1, 1), (1, 1, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (64, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
triton_per_fused_mean_3[grid(1)](buf6, buf4, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
return buf6,
class RQLossNew(Module):
"""The RQ (backwards) loss between class probabilities and predictions.
This loss is defined in `'Resolving label uncertainty with implicit generative
models' <https://openreview.net/forum?id=AEa_UepnMDX>`_.
.. versionadded:: 0.2
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| LaudateCorpus1/torchgeo | RQLoss | false | 2,496 | [
"MIT"
] | 0 | 747a9352b9663e7d0e0c90a8b53533f0bb06c9b3 | https://github.com/LaudateCorpus1/torchgeo/tree/747a9352b9663e7d0e0c90a8b53533f0bb06c9b3 |
Gaussian | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/fa/cfajjzqhflrbkf33ugoaupdyb35hpbhli75wfcrl4pqthm7oahjv.py
# Topologically Sorted Source Nodes: [exp, add, log, sigma_t, sigma_t_1], Original ATen: [aten.exp, aten.add, aten.log, aten.squeeze]
# Source node to ATen node mapping:
# add => add
# exp => exp
# log => log
# sigma_t => add_1
# sigma_t_1 => squeeze
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%addmm,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, 1e-06), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dim](args = (%add_1, 0), kwargs = {})
triton_poi_fused_add_exp_log_squeeze_0 = async_compile.triton('triton_poi_fused_add_exp_log_squeeze_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_log_squeeze_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_log_squeeze_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, add, log, sigma_t, sigma_t_1], Original ATen: [aten.exp, aten.add, aten.log, aten.squeeze]
stream0 = get_raw_stream(0)
triton_poi_fused_add_exp_log_squeeze_0.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return (buf2, buf1, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_exp_log_squeeze_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_log_squeeze_0[grid(16)](buf0, buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return buf2, buf1, primals_1, buf0
class GaussianNew(nn.Module):
def __init__(self, hidden_size, output_size):
"""
Gaussian Likelihood Supports Continuous Data
Args:
input_size (int): hidden h_{i,t} column size
output_size (int): embedding size
"""
super(GaussianNew, self).__init__()
self.mu_layer = nn.Linear(hidden_size, output_size)
self.sigma_layer = nn.Linear(hidden_size, output_size)
def forward(self, input_0):
primals_1 = self.mu_layer.weight
primals_3 = self.mu_layer.bias
primals_2 = self.sigma_layer.weight
primals_5 = self.sigma_layer.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| ashfarhangi/COVID-19_Impact | Gaussian | false | 9,755 | [
"Apache-2.0"
] | 0 | 7ce46616278cac95e31b3e853bb28ea7b8e58b7e | https://github.com/ashfarhangi/COVID-19_Impact/tree/7ce46616278cac95e31b3e853bb28ea7b8e58b7e |
RestrictionLoss | import torch
import torch.nn as nn
class RestrictionLoss(nn.Module):
def __init__(self, otherbar=0):
super().__init__()
self.otherbar = otherbar
def forward(self, predict):
loss = torch.sum(((self.otherbar - predict) * (1 - predict)) ** 2)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_pow_rsub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.0
tmp2 = tmp1 - tmp0
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = tmp2 * tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_pow_rsub_sum_0[grid(1)](arg0_1, buf0, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf0,
class RestrictionLossNew(nn.Module):
def __init__(self, otherbar=0):
super().__init__()
self.otherbar = otherbar
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Polarbeartnt/SP-ILC | RestrictionLoss | false | 5,717 | [
"MIT"
] | 1 | 07c812dfe40461409c9714936190ba1470f91fc3 | https://github.com/Polarbeartnt/SP-ILC/tree/07c812dfe40461409c9714936190ba1470f91fc3 |
SPPNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ze/czeyd3qjsq546c7ea763ybzbn4sb4zzidmbxe2coosrykwwb4pit.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 288
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/t6/ct67un24eqnkienhtq5g277aekg3shjgzyui43ixko3gkjr374lc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 65536], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 65536
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (65536*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (196608*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/xw/cxwwggnjdqdlfi4iyh6h2l72fhw7drpg36zxhvcby5wityk4xytl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 24576
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = (yindex // 96)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (96*x2) + (2400*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/sy/csyjnf5unkh5q6qz5t5b5c3jbhntegigtgotmwgfth6nqamth6uk.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 98304
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/xy/cxykqywqmmx4lrmyr3xkhylqdliejlnthpdo3owql5dyasl6adf3.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 147456
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = (yindex // 384)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (384*x2) + (3456*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/zp/czp4p3m56a2sa5regcqiwtob3pi3xbg5a4o6jaslaemdw6wu6m2h.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 98304
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = (yindex // 384)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (384*x2) + (3456*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/7q/c7qhslvblcwzpdl623xubzde7e42yq7vmj3ptvbjxhobsaw6ae6o.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6000000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/b6/cb6q26ovch4nf6mjvpzolppnxz7snz6l6egegpdqtimrfkcujj5p.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%view, [0, 0, 0, 0, 2, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_7 = async_compile.triton('triton_poi_fused_constant_pad_nd_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 128], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 62500
xnumel = 99
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 15625
y1 = (yindex // 15625)
tmp0 = (-2) + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-2) + x2 + (96*y3)), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tmp8 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + (y0 + (15648*x2) + (1549152*y1)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/74/c745cth3douh3txivm47zyg4w4blpsrq647udxfmitp6plgzatch.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool3d]
# Source node to ATen node mapping:
# x_1 => avg_pool3d
# Graph fragment:
# %avg_pool3d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool3d.default](args = (%constant_pad_nd, [4, 1, 1], [1, 1, 1]), kwargs = {})
triton_poi_fused_avg_pool3d_8 = async_compile.triton('triton_poi_fused_avg_pool3d_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool3d_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool3d_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6000000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 15625
x1 = (xindex // 15625) % 96
x2 = (xindex // 1500000)
x3 = (xindex // 15625)
tmp0 = tl.load(in_ptr0 + (x0 + (15648*x1) + (1549152*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (15648 + x0 + (15648*x1) + (1549152*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (31296 + x0 + (15648*x1) + (1549152*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (46944 + x0 + (15648*x1) + (1549152*x2)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + (15648*x3)), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/55/c55rzydqdf3yoqqey3vjjj77jcubepjozcci6f734vsigp4ftm6o.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.mul, aten.add, aten.pow, aten.div]
# Source node to ATen node mapping:
# x => relu
# x_1 => add, div, mul_1, pow_1
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 0.0001), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %pow_1), kwargs = {})
triton_poi_fused_add_div_mul_pow_relu_9 = async_compile.triton('triton_poi_fused_add_div_mul_pow_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_relu_9(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 62500
xnumel = 96
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 15625
y1 = (yindex // 15625)
tmp0 = tl.load(in_ptr0 + (x2 + (96*y3)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + (15648*x2) + (1502208*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp2 / tmp9
tl.store(out_ptr0 + (x2 + (96*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/7z/c7zjh5im7qeahjdg5kd3yev2wj5q2chz67bxvvvuufglejopbjkc.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1476096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 96
x1 = (xindex // 96) % 62
x2 = (xindex // 5952) % 62
x3 = (xindex // 369024)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (96 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp3 = tl.load(in_ptr0 + (192 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (12000 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp7 = tl.load(in_ptr0 + (12096 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp9 = tl.load(in_ptr0 + (12192 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp11 = tl.load(in_ptr0 + (24000 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp13 = tl.load(in_ptr0 + (24096 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp15 = tl.load(in_ptr0 + (24192 + x0 + (192*x1) + (24000*x2) + (1500000*x3)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x4), tmp16, xmask)
tl.store(out_ptr1 + (x4), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3n/c3nf6mv2ihi6pcxofp44ziubmbtqc4tvnn6cqy76ajfpnqhhfnf6.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_11 = async_compile.triton('triton_poi_fused_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/5i/c5id6bqsc6dt4ukh7e2mqccykb3lgt37jbp2d2wyrumbj7lf4ojv.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_4 => constant_pad_nd_1
# Graph fragment:
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%view_2, [0, 0, 0, 0, 2, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_12 = async_compile.triton('triton_poi_fused_constant_pad_nd_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 512], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3364
xnumel = 259
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 841
y1 = (yindex // 841)
tmp0 = (-2) + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-2) + x2 + (256*y3)), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tmp8 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + (y0 + (841*x2) + (217824*y1)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ih/cihayq6ysww65x2t46joafeiikifgoegjfsn3rc6ipkio4gcgspy.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool3d]
# Source node to ATen node mapping:
# x_4 => avg_pool3d_1
# Graph fragment:
# %avg_pool3d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool3d.default](args = (%constant_pad_nd_1, [4, 1, 1], [1, 1, 1]), kwargs = {})
triton_poi_fused_avg_pool3d_13 = async_compile.triton('triton_poi_fused_avg_pool3d_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool3d_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool3d_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 215296
x1 = (xindex // 215296)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (217824*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (841 + x0 + (217824*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (1682 + x0 + (217824*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (2523 + x0 + (217824*x1)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/4w/c4wbqjb2byzie5jmglossk2c6jp23ihfhclj3mvminpag7drulme.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.relu, aten.mul, aten.add, aten.pow, aten.div]
# Source node to ATen node mapping:
# x_3 => relu_1
# x_4 => add_1, div_1, mul_3, pow_2
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 0.0001), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1.0), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.75), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu_1, %pow_2), kwargs = {})
triton_poi_fused_add_div_mul_pow_relu_14 = async_compile.triton('triton_poi_fused_add_div_mul_pow_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_relu_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_relu_14(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3364
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 841
y1 = (yindex // 841)
tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + (841*x2) + (215296*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp2 / tmp9
tl.store(out_ptr0 + (x2 + (256*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/go/cgo4z5atfxjiqowmnopbccpjefbqjuyuczcwrsl3dstgce5wwaei.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 200704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 14
x2 = (xindex // 3584) % 14
x3 = (xindex // 50176)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp5 = tl.load(in_ptr0 + (7424 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp7 = tl.load(in_ptr0 + (7680 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp9 = tl.load(in_ptr0 + (7936 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp11 = tl.load(in_ptr0 + (14848 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp13 = tl.load(in_ptr0 + (15104 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp15 = tl.load(in_ptr0 + (15360 + x0 + (512*x1) + (14848*x2) + (215296*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x4), tmp16, None)
tl.store(out_ptr1 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/f2/cf2l22hpl6lqmacio3r7dy5m5ndmrzyofuepyclqryzjanxzlyhd.py
# Topologically Sorted Source Nodes: [conv2d_2, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_6 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 221184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/r2/cr2ifajkdlxrhpblpdoiq4lecdzt6hhqls4lpkvvqmeutj5eizv7.py
# Topologically Sorted Source Nodes: [conv2d_3, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_7 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 153600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/vs/cvss3q75dh6mvw6htoe7j5xp74uadvzm7rpianqkwazsebohb7x2.py
# Topologically Sorted Source Nodes: [conv2d_4, x_8], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_8 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/7v/c7vtgjcwimcvbehsp4vujvlas4yar6ykioq66mhfwk366yydb74q.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_9 => getitem_5
# Graph fragment:
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_19 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 4
x2 = (xindex // 1024)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (4096*x2)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (4096*x2)), None)
tmp7 = tl.load(in_ptr0 + (2048 + x0 + (512*x1) + (4096*x2)), None)
tmp12 = tl.load(in_ptr0 + (2304 + x0 + (512*x1) + (4096*x2)), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/ri/crig7z52ga2kh4pneyfztam4fwsb4zxc3foh6qo6httvlsjj6xeu.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_10 => _low_memory_max_pool2d_with_offsets_3, getitem_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_4, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_20 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 2
y4 = (yindex // 2)
y2 = (yindex // 4)
y5 = yindex % 4
y6 = yindex
tmp0 = tl.load(in_ptr0 + (x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (512 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (768 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2048 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2304 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2560 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2816 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (4096 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (4352 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (4608 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (4864 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (6144 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (6400 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (6656 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (6912 + x3 + (1024*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1, 1], 1, tl.int8)
tmp33 = tl.full([1, 1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1, 1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1, 1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1, 1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1, 1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1, 1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1, 1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1, 1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1, 1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1, 1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1, 1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1, 1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1, 1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1, 1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1, 1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (y5 + (4*x3) + (1024*y2)), tmp30, xmask & ymask)
tl.store(out_ptr1 + (x3 + (256*y6)), tmp76, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/g4/cg4hxzqg24baeuxl4ml4jqtqnsbsrcxxobbynypxrv35m4wsq2ez.py
# Topologically Sorted Source Nodes: [spp_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# spp_2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %view_6], 1), kwargs = {})
triton_poi_fused_cat_21 = async_compile.triton('triton_poi_fused_cat_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 21504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5376
x1 = (xindex // 5376)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5120, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4096, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + ((512*(x0 % 4)) + (4096*((x0 // 4) % 4)) + (16384*x1) + ((x0 // 16) % 256)), tmp7 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (256 + (512*(x0 % 4)) + (4096*((x0 // 4) % 4)) + (16384*x1) + ((x0 // 16) % 256)), tmp7 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = tl.load(in_ptr0 + (2048 + (512*(x0 % 4)) + (4096*((x0 // 4) % 4)) + (16384*x1) + ((x0 // 16) % 256)), tmp7 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = tl.load(in_ptr0 + (2304 + (512*(x0 % 4)) + (4096*((x0 // 4) % 4)) + (16384*x1) + ((x0 // 16) % 256)), tmp7 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp7, tmp14, tmp15)
tmp17 = tmp0 >= tmp5
tmp18 = tmp17 & tmp4
tmp19 = tl.load(in_ptr1 + ((1024*x1) + ((-4096) + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp6, tmp16, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp4, tmp20, tmp21)
tmp23 = tmp0 >= tmp3
tmp24 = tl.full([1], 5376, tl.int64)
tmp25 = tmp0 < tmp24
tmp26 = tl.load(in_ptr2 + ((256*x1) + ((-5120) + x0)), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tl.where(tmp4, tmp22, tmp26)
tl.store(out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/wq/cwqifm67pplkh6yo5ydw6rquqxl65xzqjhxz7siv7ipjvhuvidym.py
# Topologically Sorted Source Nodes: [fc1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# fc1 => relu_5
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_13), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_22 = async_compile.triton('triton_poi_fused_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_22', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (96, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (96, ), (1, ))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (256, 96, 5, 5), (2400, 25, 5, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (384, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (384, ), (1, ))
assert_size_stride(primals_8, (384, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_9, (384, ), (1, ))
assert_size_stride(primals_10, (256, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (4096, 5376), (5376, 1))
assert_size_stride(primals_13, (4096, ), (1, ))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096, ), (1, ))
assert_size_stride(primals_16, (102, 4096), (4096, 1))
assert_size_stride(primals_17, (102, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((96, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 288, 49, grid=grid(288, 49), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 256, 256), (196608, 1, 768, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 65536, grid=grid(12, 65536), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((256, 96, 5, 5), (2400, 1, 480, 96), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 24576, 25, grid=grid(24576, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((384, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 98304, 9, grid=grid(98304, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((384, 384, 3, 3), (3456, 1, 1152, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 147456, 9, grid=grid(147456, 9), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((256, 384, 3, 3), (3456, 1, 1152, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_10, buf5, 98304, 9, grid=grid(98304, 9), stream=stream0)
del primals_10
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 96, 125, 125), (1500000, 1, 12000, 96))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf7, primals_2, 6000000, grid=grid(6000000), stream=stream0)
del primals_2
buf8 = empty_strided_cuda((4, 1, 99, 125, 125), (1549152, 1549152, 15648, 125, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_7.run(buf7, buf8, 62500, 99, grid=grid(62500, 99), stream=stream0)
buf9 = empty_strided_cuda((4, 1, 96, 125, 125), (1502208, 1502208, 15648, 125, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool3d]
triton_poi_fused_avg_pool3d_8.run(buf8, buf9, 6000000, grid=grid(6000000), stream=stream0)
buf10 = empty_strided_cuda((4, 96, 125, 125), (1500000, 1, 12000, 96), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.mul, aten.add, aten.pow, aten.div]
triton_poi_fused_add_div_mul_pow_relu_9.run(buf7, buf9, buf10, 62500, 96, grid=grid(62500, 96), stream=stream0)
buf11 = empty_strided_cuda((4, 96, 62, 62), (369024, 1, 5952, 96), torch.float32)
buf12 = empty_strided_cuda((4, 96, 62, 62), (369024, 1, 5952, 96), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf10, buf11, buf12, 1476096, grid=grid(1476096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 256, 29, 29), (215296, 1, 7424, 256))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_11.run(buf14, primals_5, 861184, grid=grid(861184), stream=stream0)
del primals_5
buf15 = empty_strided_cuda((4, 1, 259, 29, 29), (217824, 217824, 841, 29, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_12.run(buf14, buf15, 3364, 259, grid=grid(3364, 259), stream=stream0)
buf16 = empty_strided_cuda((4, 1, 256, 29, 29), (215296, 215296, 841, 29, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool3d]
triton_poi_fused_avg_pool3d_13.run(buf15, buf16, 861184, grid=grid(861184), stream=stream0)
buf17 = empty_strided_cuda((4, 256, 29, 29), (215296, 1, 7424, 256), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.relu, aten.mul, aten.add, aten.pow, aten.div]
triton_poi_fused_add_div_mul_pow_relu_14.run(buf14, buf16, buf17, 3364, 256, grid=grid(3364, 256), stream=stream0)
buf18 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256), torch.float32)
buf19 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_15.run(buf17, buf18, buf19, 200704, grid=grid(200704), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 384, 12, 12), (55296, 1, 4608, 384))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf21, primals_7, 221184, grid=grid(221184), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 384, 10, 10), (38400, 1, 3840, 384))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf23, primals_9, 153600, grid=grid(153600), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf25, primals_11, 65536, grid=grid(65536), stream=stream0)
del primals_11
buf26 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_19.run(buf25, buf26, 16384, grid=grid(16384), stream=stream0)
buf27 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.float32)
buf28 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.int8)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_20.run(buf25, buf27, buf28, 16, 256, grid=grid(16, 256), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.max_pool2d_with_indices]
buf29 = torch.ops.aten.max_pool2d_with_indices.default(buf25, [8, 8], [8, 8])
buf30 = buf29[0]
buf31 = buf29[1]
del buf29
buf32 = empty_strided_cuda((4, 5376), (5376, 1), torch.float32)
# Topologically Sorted Source Nodes: [spp_2], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf25, buf27, buf30, buf32, 21504, grid=grid(21504), stream=stream0)
del buf27
del buf30
buf33 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf32, reinterpret_tensor(primals_12, (5376, 4096), (1, 5376), 0), out=buf33)
buf34 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [fc1], Original ATen: [aten.relu]
triton_poi_fused_relu_22.run(buf34, primals_13, 16384, grid=grid(16384), stream=stream0)
del primals_13
buf35 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf34, reinterpret_tensor(primals_14, (4096, 4096), (1, 4096), 0), out=buf35)
buf36 = buf35; del buf35 # reuse
# Topologically Sorted Source Nodes: [fc2], Original ATen: [aten.relu]
triton_poi_fused_relu_22.run(buf36, primals_15, 16384, grid=grid(16384), stream=stream0)
del primals_15
buf37 = empty_strided_cuda((4, 102), (102, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, buf36, reinterpret_tensor(primals_16, (4096, 102), (1, 4096), 0), alpha=1, beta=1, out=buf37)
del primals_17
return (buf37, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf8, buf9, buf10, buf11, buf12, buf14, buf15, buf16, buf17, buf18, buf19, buf21, buf23, buf25, buf26, buf28, buf31, buf32, buf34, buf36, primals_16, primals_14, primals_12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((96, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 256, 256), (196608, 65536, 256, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 96, 5, 5), (2400, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((384, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((384, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 384, 3, 3), (3456, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4096, 5376), (5376, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4096, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((102, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((102, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 288
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 65536 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 196608 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = yindex // 96
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 96 * x2 + 2400 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 3456 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 3456 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 6000000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 62500
xnumel = 99
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 15625
y1 = yindex // 15625
tmp0 = -2 + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 96, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-2 + x2 + 96 * y3), tmp5 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tmp8 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + (y0 + 15648 * x2 + 1549152 * y1), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_avg_pool3d_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 6000000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 15625
x1 = xindex // 15625 % 96
x2 = xindex // 1500000
x3 = xindex // 15625
tmp0 = tl.load(in_ptr0 + (x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (15648 + x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (31296 + x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (46944 + x0 + 15648 * x1 + 1549152 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0 + 15648 * x3), tmp8, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_relu_9(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 62500
xnumel = 96
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 15625
y1 = yindex // 15625
tmp0 = tl.load(in_ptr0 + (x2 + 96 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 15648 * x2 + 1502208 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp2 / tmp9
tl.store(out_ptr0 + (x2 + 96 * y3), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1476096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 96
x1 = xindex // 96 % 62
x2 = xindex // 5952 % 62
x3 = xindex // 369024
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 192 * x1 + 24000 * x2 + 1500000 * x3), xmask
)
tmp1 = tl.load(in_ptr0 + (96 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp3 = tl.load(in_ptr0 + (192 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp5 = tl.load(in_ptr0 + (12000 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp7 = tl.load(in_ptr0 + (12096 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp9 = tl.load(in_ptr0 + (12192 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp11 = tl.load(in_ptr0 + (24000 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (24096 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (24192 + x0 + 192 * x1 + 24000 * x2 + 1500000 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_12(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 3364
xnumel = 259
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 841
y1 = yindex // 841
tmp0 = -2 + x2
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-2 + x2 + 256 * y3), tmp5 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tmp8 * tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp5, tmp9, tmp10)
tl.store(out_ptr0 + (y0 + 841 * x2 + 217824 * y1), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_avg_pool3d_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 861184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 215296
x1 = xindex // 215296
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 217824 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (841 + x0 + 217824 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (1682 + x0 + 217824 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (2523 + x0 + 217824 * x1), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_relu_14(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 3364
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 841
y1 = yindex // 841
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 841 * x2 + 215296 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp2 / tmp9
tl.store(out_ptr0 + (x2 + 256 * y3), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 14
x2 = xindex // 3584 % 14
x3 = xindex // 50176
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 14848 * x2 + 215296 * x3), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp5 = tl.load(in_ptr0 + (7424 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp7 = tl.load(in_ptr0 + (7680 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp9 = tl.load(in_ptr0 + (7936 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp11 = tl.load(in_ptr0 + (14848 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp13 = tl.load(in_ptr0 + (15104 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp15 = tl.load(in_ptr0 + (15360 + x0 + 512 * x1 + 14848 * x2 + 215296 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, None)
tl.store(out_ptr1 + x4, tmp41, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 4
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 4096 * x2), None)
tmp7 = tl.load(in_ptr0 + (2048 + x0 + 512 * x1 + 4096 * x2), None)
tmp12 = tl.load(in_ptr0 + (2304 + x0 + 512 * x1 + 4096 * x2), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 2
y4 = yindex // 2
y2 = yindex // 4
y5 = yindex % 4
y6 = yindex
tmp0 = tl.load(in_ptr0 + (x3 + 1024 * y0 + 8192 * y4), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (512 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (768 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2048 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2304 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2560 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2816 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (4096 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (4352 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (4608 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (4864 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (6144 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (6400 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (6656 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (6912 + x3 + 1024 * y0 + 8192 * y4), xmask &
ymask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1, 1], 1, tl.int8)
tmp33 = tl.full([1, 1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1, 1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1, 1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1, 1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1, 1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1, 1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1, 1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1, 1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1, 1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1, 1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1, 1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1, 1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1, 1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1, 1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1, 1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (y5 + 4 * x3 + 1024 * y2), tmp30, xmask & ymask)
tl.store(out_ptr1 + (x3 + 256 * y6), tmp76, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 21504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5376
x1 = xindex // 5376
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 5120, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4096, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + (512 * (x0 % 4) + 4096 * (x0 // 4 % 4) + 16384 *
x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr0 + (256 + 512 * (x0 % 4) + 4096 * (x0 // 4 % 4) +
16384 * x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = tl.load(in_ptr0 + (2048 + 512 * (x0 % 4) + 4096 * (x0 // 4 % 4) +
16384 * x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = tl.load(in_ptr0 + (2304 + 512 * (x0 % 4) + 4096 * (x0 // 4 % 4) +
16384 * x1 + x0 // 16 % 256), tmp7 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp7, tmp14, tmp15)
tmp17 = tmp0 >= tmp5
tmp18 = tmp17 & tmp4
tmp19 = tl.load(in_ptr1 + (1024 * x1 + (-4096 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp6, tmp16, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp4, tmp20, tmp21)
tmp23 = tmp0 >= tmp3
tl.full([1], 5376, tl.int64)
tmp26 = tl.load(in_ptr2 + (256 * x1 + (-5120 + x0)), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.where(tmp4, tmp22, tmp26)
tl.store(out_ptr0 + x2, tmp27, xmask)
@triton.jit
def triton_poi_fused_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (96, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (96,), (1,))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (256, 96, 5, 5), (2400, 25, 5, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (384, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (384, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_9, (384,), (1,))
assert_size_stride(primals_10, (256, 384, 3, 3), (3456, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (4096, 5376), (5376, 1))
assert_size_stride(primals_13, (4096,), (1,))
assert_size_stride(primals_14, (4096, 4096), (4096, 1))
assert_size_stride(primals_15, (4096,), (1,))
assert_size_stride(primals_16, (102, 4096), (4096, 1))
assert_size_stride(primals_17, (102,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((96, 3, 7, 7), (147, 1, 21, 3), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(288, 49)](primals_1, buf0, 288, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 256, 256), (196608, 1, 768, 3),
torch.float32)
triton_poi_fused_1[grid(12, 65536)](primals_3, buf1, 12, 65536,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 96, 5, 5), (2400, 1, 480, 96),
torch.float32)
triton_poi_fused_2[grid(24576, 25)](primals_4, buf2, 24576, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((384, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_3[grid(98304, 9)](primals_6, buf3, 98304, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((384, 384, 3, 3), (3456, 1, 1152, 384),
torch.float32)
triton_poi_fused_4[grid(147456, 9)](primals_8, buf4, 147456, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((256, 384, 3, 3), (3456, 1, 1152, 384),
torch.float32)
triton_poi_fused_5[grid(98304, 9)](primals_10, buf5, 98304, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf6 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 96, 125, 125), (1500000, 1, 12000, 96))
buf7 = buf6
del buf6
triton_poi_fused_convolution_6[grid(6000000)](buf7, primals_2,
6000000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf8 = empty_strided_cuda((4, 1, 99, 125, 125), (1549152, 1549152,
15648, 125, 1), torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(62500, 99)](buf7, buf8,
62500, 99, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 1, 96, 125, 125), (1502208, 1502208,
15648, 125, 1), torch.float32)
triton_poi_fused_avg_pool3d_8[grid(6000000)](buf8, buf9, 6000000,
XBLOCK=512, num_warps=8, num_stages=1)
buf10 = empty_strided_cuda((4, 96, 125, 125), (1500000, 1, 12000,
96), torch.float32)
triton_poi_fused_add_div_mul_pow_relu_9[grid(62500, 96)](buf7, buf9,
buf10, 62500, 96, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 96, 62, 62), (369024, 1, 5952, 96),
torch.float32)
buf12 = empty_strided_cuda((4, 96, 62, 62), (369024, 1, 5952, 96),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(1476096)](buf10,
buf11, buf12, 1476096, XBLOCK=512, num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 256, 29, 29), (215296, 1, 7424, 256))
buf14 = buf13
del buf13
triton_poi_fused_convolution_11[grid(861184)](buf14, primals_5,
861184, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf15 = empty_strided_cuda((4, 1, 259, 29, 29), (217824, 217824,
841, 29, 1), torch.float32)
triton_poi_fused_constant_pad_nd_12[grid(3364, 259)](buf14, buf15,
3364, 259, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
buf16 = empty_strided_cuda((4, 1, 256, 29, 29), (215296, 215296,
841, 29, 1), torch.float32)
triton_poi_fused_avg_pool3d_13[grid(861184)](buf15, buf16, 861184,
XBLOCK=512, num_warps=8, num_stages=1)
buf17 = empty_strided_cuda((4, 256, 29, 29), (215296, 1, 7424, 256),
torch.float32)
triton_poi_fused_add_div_mul_pow_relu_14[grid(3364, 256)](buf14,
buf16, buf17, 3364, 256, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
buf18 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256),
torch.float32)
buf19 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_15[grid(200704)](buf17,
buf18, buf19, 200704, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 384, 12, 12), (55296, 1, 4608, 384))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_16[grid(221184)](buf21, primals_7,
221184, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 384, 10, 10), (38400, 1, 3840, 384))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_17[grid(153600)](buf23, primals_9,
153600, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf24 = extern_kernels.convolution(buf23, buf5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_18[grid(65536)](buf25, primals_11,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_11
buf26 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(16384)](buf25,
buf26, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
buf28 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_20[grid(16, 256)](buf25,
buf27, buf28, 16, 256, XBLOCK=256, YBLOCK=1, num_warps=4,
num_stages=1)
buf29 = torch.ops.aten.max_pool2d_with_indices.default(buf25, [8, 8
], [8, 8])
buf30 = buf29[0]
buf31 = buf29[1]
del buf29
buf32 = empty_strided_cuda((4, 5376), (5376, 1), torch.float32)
triton_poi_fused_cat_21[grid(21504)](buf25, buf27, buf30, buf32,
21504, XBLOCK=256, num_warps=4, num_stages=1)
del buf27
del buf30
buf33 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf32, reinterpret_tensor(primals_12, (5376, 4096
), (1, 5376), 0), out=buf33)
buf34 = buf33
del buf33
triton_poi_fused_relu_22[grid(16384)](buf34, primals_13, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf35 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32)
extern_kernels.mm(buf34, reinterpret_tensor(primals_14, (4096, 4096
), (1, 4096), 0), out=buf35)
buf36 = buf35
del buf35
triton_poi_fused_relu_22[grid(16384)](buf36, primals_15, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf37 = empty_strided_cuda((4, 102), (102, 1), torch.float32)
extern_kernels.addmm(primals_17, buf36, reinterpret_tensor(
primals_16, (4096, 102), (1, 4096), 0), alpha=1, beta=1, out=buf37)
del primals_17
return (buf37, buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf8, buf9,
buf10, buf11, buf12, buf14, buf15, buf16, buf17, buf18, buf19,
buf21, buf23, buf25, buf26, buf28, buf31, buf32, buf34, buf36,
primals_16, primals_14, primals_12)
def spatial_pyramid_pool(previous_conv, num_sample, previous_conv_size,
out_pool_size):
"""
previous_conv: a tensor vector of previous convolution layer
num_sample: an int number of image in the batch
previous_conv_size: an int vector [height, width] of the matrix features size of previous convolution layer
out_pool_size: a int vector of expected output size of max pooling layer
returns: a tensor vector with shape [1 x n] is the concentration of multi-level pooling
"""
for i in range(len(out_pool_size)):
h, w = previous_conv_size
h_wid = math.ceil(h / out_pool_size[i])
w_wid = math.ceil(w / out_pool_size[i])
h_str = math.floor(h / out_pool_size[i])
w_str = math.floor(w / out_pool_size[i])
max_pool = nn.MaxPool2d(kernel_size=(h_wid, w_wid), stride=(h_str,
w_str))
x = max_pool(previous_conv)
if i == 0:
spp = x.view(num_sample, -1)
else:
spp = torch.cat((spp, x.view(num_sample, -1)), 1)
return spp
class SPPNetNew(nn.Module):
"""
A CNN model which adds spp layer so that we can input single-size tensor
"""
def __init__(self, n_classes=102, init_weights=True):
super(SPPNetNew, self).__init__()
"""
'wc1',[3,96,11,11]
'wc2',[96,256,5,5]
'wc3',[256,384,3,3]
'wc4':[384,384,3,3]
'wc5':[384,256,3,3]
'fc6':[spatial_pool_dim*256,4096]
'fc7':[4096,4096]
'out',[4096,n_classes])
"""
self.output_num = [4, 2, 1]
self.conv1 = nn.Conv2d(3, 96, kernel_size=7, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(96, 256, kernel_size=5, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(256, 384, kernel_size=3)
self.conv4 = nn.Conv2d(384, 384, kernel_size=3)
self.conv5 = nn.Conv2d(384, 256, kernel_size=3)
self.fc1 = nn.Linear(sum([(i * i) for i in self.output_num]) * 256,
4096)
self.fc2 = nn.Linear(4096, 4096)
self.out = nn.Linear(4096, n_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.fc1.weight
primals_13 = self.fc1.bias
primals_14 = self.fc2.weight
primals_15 = self.fc2.bias
primals_16 = self.out.weight
primals_17 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| maj34/Deep-Learning-Papers | SPPNet | false | 13,081 | [
"MIT"
] | 0 | 2672d3426b3f4342f7d81cd5ae029f2485594b4c | https://github.com/maj34/Deep-Learning-Papers/tree/2672d3426b3f4342f7d81cd5ae029f2485594b4c |
NormalNoiseGenerator | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/wc/cwc5ci3x64sywutc7imfem4lyme6vua6kqapyj2w55wbh2kjcctq.py
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [randn_like], Original ATen: [aten.randn_like]
buf0 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(buf2, 256, grid=grid(256), stream=stream0)
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.distributions
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf2,
class AdversarialNoiseGenerator(torch.nn.Module):
def __init__(self):
super().__init__()
return
def forward(self, x):
raise NotImplementedError()
class NormalNoiseGeneratorNew(AdversarialNoiseGenerator):
def __init__(self, sigma=1.0, mu=0):
super().__init__()
self.sigma = sigma
self.mu = mu
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| AlexMeinke/Provable-OOD-Detection | NormalNoiseGenerator | false | 7,692 | [
"MIT"
] | 21 | 9a132aec994ff718c96b81885736ab866df60d87 | https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87 |
GateAddNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/pt/cpt3kou2mafw4tfe5tp2oixeb4z2kkf6adwejs6l7mux2qvryb27.py
# Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.glu, aten.add]
# Source node to ATen node mapping:
# add => add
# x_1 => glu
# Graph fragment:
# %glu : [num_users=1] = call_function[target=torch.ops.aten.glu.default](args = (%view_1,), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%glu, %primals_4), kwargs = {})
triton_poi_fused_add_glu_0 = async_compile.triton('triton_poi_fused_add_glu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_glu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_glu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6n/c6nwltytpo33ssumvxlcryrpvlql2hsjrmxl624j4dkkjxt5qgkm.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add_1, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.glu, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_glu_0.run(buf0, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_6
return (buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_glu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp4 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_glu_0[grid(256)](buf0, primals_4, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3,
primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del buf3
del primals_6
return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf1
class TimeDistributedInterpolation(nn.Module):
def __init__(self, output_size: 'int', batch_first: 'bool'=False,
trainable: 'bool'=False):
super().__init__()
self.output_size = output_size
self.batch_first = batch_first
self.trainable = trainable
if self.trainable:
self.mask = nn.Parameter(torch.zeros(self.output_size, dtype=
torch.float32))
self.gate = nn.Sigmoid()
def interpolate(self, x):
upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode=
'linear', align_corners=True).squeeze(1)
if self.trainable:
upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0
return upsampled
def forward(self, x):
if len(x.size()) <= 2:
return self.interpolate(x)
x_reshape = x.contiguous().view(-1, x.size(-1))
y = self.interpolate(x_reshape)
if self.batch_first:
y = y.contiguous().view(x.size(0), -1, y.size(-1))
else:
y = y.view(-1, x.size(1), y.size(-1))
return y
class GatedLinearUnit(nn.Module):
"""Gated Linear Unit"""
def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout:
'float'=None):
super().__init__()
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = dropout
self.hidden_size = hidden_size or input_size
self.fc = nn.Linear(input_size, self.hidden_size * 2)
self.init_weights()
def init_weights(self):
for n, p in self.named_parameters():
if 'bias' in n:
torch.nn.init.zeros_(p)
elif 'fc' in n:
torch.nn.init.xavier_uniform_(p)
def forward(self, x):
if self.dropout is not None:
x = self.dropout(x)
x = self.fc(x)
x = F.glu(x, dim=-1)
return x
class AddNorm(nn.Module):
def __init__(self, input_size: 'int', skip_size: 'int'=None,
trainable_add: 'bool'=True):
super().__init__()
self.input_size = input_size
self.trainable_add = trainable_add
self.skip_size = skip_size or input_size
if self.input_size != self.skip_size:
self.resample = TimeDistributedInterpolation(self.input_size,
batch_first=True, trainable=False)
if self.trainable_add:
self.mask = nn.Parameter(torch.zeros(self.input_size, dtype=
torch.float))
self.gate = nn.Sigmoid()
self.norm = nn.LayerNorm(self.input_size)
def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'):
if self.input_size != self.skip_size:
skip = self.resample(skip)
if self.trainable_add:
skip = skip * self.gate(self.mask) * 2.0
output = self.norm(x + skip)
return output
class GateAddNormNew(nn.Module):
def __init__(self, input_size: 'int', hidden_size: 'int'=None,
skip_size: 'int'=None, trainable_add: 'bool'=False, dropout:
'float'=None):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size or input_size
self.skip_size = skip_size or self.hidden_size
self.dropout = dropout
self.glu = GatedLinearUnit(self.input_size, hidden_size=self.
hidden_size, dropout=self.dropout)
self.add_norm = AddNorm(self.hidden_size, skip_size=self.skip_size,
trainable_add=trainable_add)
def forward(self, input_0, input_1):
primals_1 = self.glu.fc.weight
primals_2 = self.glu.fc.bias
primals_5 = self.add_norm.norm.weight
primals_6 = self.add_norm.norm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| JustinNeumann/pytorch-forecasting | GateAddNorm | false | 696 | [
"MIT"
] | 0 | 4f6e449cb3788b856e66c4283398a5db201aa6ff | https://github.com/JustinNeumann/pytorch-forecasting/tree/4f6e449cb3788b856e66c4283398a5db201aa6ff |
BertOutAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_8, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del buf9
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf9
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertOutAttentionNew(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
if ctx_dim is None:
ctx_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_7 = self.value.weight
primals_8 = self.value.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| ashutoshbaghel/tgifqa-lxmert | BertOutAttention | false | 1,510 | [
"MIT"
] | 0 | 7969f478d20fbfbba1c0eaaf0b96891654bfcc26 | https://github.com/ashutoshbaghel/tgifqa-lxmert/tree/7969f478d20fbfbba1c0eaaf0b96891654bfcc26 |
ValueNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf9, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf8, 256, grid=grid(256), stream=stream0)
del primals_7
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_9
return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), primals_8, buf8, primals_6, buf9, primals_4, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5,
primals_7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf7)
del primals_9
return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), primals_8, buf8, primals_6, buf9, primals_4, buf10
class ValueNetworkNew(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=0.003):
super(ValueNetworkNew, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, hidden_dim)
self.linear4 = nn.Linear(hidden_dim, 1)
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.linear4.weight
primals_9 = self.linear4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| JieRen98/Popular-RL-Algorithms | ValueNetwork | false | 13,908 | [
"Apache-2.0"
] | 273 | 7f2bb74a51cf9cbde92a6ccfa42e97dc129dd145 | https://github.com/JieRen98/Popular-RL-Algorithms/tree/7f2bb74a51cf9cbde92a6ccfa42e97dc129dd145 |
AlignQuestionEmbedding | import torch
import torch.nn as nn
import torch.nn.functional as F
class AlignQuestionEmbedding(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.linear = nn.Linear(input_dim, input_dim)
self.relu = nn.ReLU()
def forward(self, context, question, question_mask):
ctx_ = self.linear(context)
ctx_ = self.relu(ctx_)
qtn_ = self.linear(question)
qtn_ = self.relu(qtn_)
qtn_transpose = qtn_.permute(0, 2, 1)
align_scores = torch.bmm(ctx_, qtn_transpose)
qtn_mask = question_mask.unsqueeze(1).expand(align_scores.size())
align_scores = align_scores.masked_fill(qtn_mask == 1, -float('inf'))
align_scores_flat = align_scores.view(-1, question.size(1))
alpha = F.softmax(align_scores_flat, dim=1)
alpha = alpha.view(-1, context.shape[1], question.shape[1])
align_embedding = torch.bmm(alpha, question)
return align_embedding
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_out_ptr1,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = 0.0
tmp9 = tmp7 <= tmp8
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(in_out_ptr1 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * (x0 // 4), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * (x0 // 4)), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr1 + x0, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4)), xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf2)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf3,
primals_2, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf3, (4, 4, 4), (16, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
buf6 = empty_strided_cuda((16, 1), (1, 16), torch.float32)
triton_poi_fused__softmax_1[grid(16)](primals_5, buf4, buf5, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](primals_5, buf4, buf5, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del buf6
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1),
0), primals_4, out=buf8)
del buf7
return buf8, primals_4, primals_5, reinterpret_tensor(primals_3, (16, 4
), (4, 1), 0), buf1, buf4, buf3, buf9
class AlignQuestionEmbeddingNew(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.linear = nn.Linear(input_dim, input_dim)
self.relu = nn.ReLU()
def forward(self, input_0, input_1, input_2):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| HuyTu7/dl_optimizers | AlignQuestionEmbedding | false | 9,137 | [
"MIT"
] | 0 | 245242718324cebcabe657bdbc704aa54ad0b8d2 | https://github.com/HuyTu7/dl_optimizers/tree/245242718324cebcabe657bdbc704aa54ad0b8d2 |
Quantizing | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dh/cdh7thxgq76cywbiys7o3xrowoojcupcla2vg4oph4hiyptk7cct.py
# Topologically Sorted Source Nodes: [delta, mul, dist, q_idx], Original ATen: [aten.sub, aten.mul, aten.sum, aten.argmin]
# Source node to ATen node mapping:
# delta => sub
# dist => sum_1
# mul => mul
# q_idx => argmin
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %argmin : [num_users=2] = call_function[target=torch.ops.aten.argmin.default](args = (%sum_1, -1), kwargs = {})
triton_poi_fused_argmin_mul_sub_sum_0 = async_compile.triton('triton_poi_fused_argmin_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmin_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmin_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3))
tmp18 = tl.broadcast_to(tmp17, [XBLOCK])
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (4))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp27 = tl.load(in_ptr0 + (5))
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp32 = tl.load(in_ptr0 + (6))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (7))
tmp38 = tl.broadcast_to(tmp37, [XBLOCK])
tmp57 = tl.load(in_ptr0 + (8))
tmp58 = tl.broadcast_to(tmp57, [XBLOCK])
tmp61 = tl.load(in_ptr0 + (9))
tmp62 = tl.broadcast_to(tmp61, [XBLOCK])
tmp66 = tl.load(in_ptr0 + (10))
tmp67 = tl.broadcast_to(tmp66, [XBLOCK])
tmp71 = tl.load(in_ptr0 + (11))
tmp72 = tl.broadcast_to(tmp71, [XBLOCK])
tmp90 = tl.load(in_ptr0 + (12))
tmp91 = tl.broadcast_to(tmp90, [XBLOCK])
tmp94 = tl.load(in_ptr0 + (13))
tmp95 = tl.broadcast_to(tmp94, [XBLOCK])
tmp99 = tl.load(in_ptr0 + (14))
tmp100 = tl.broadcast_to(tmp99, [XBLOCK])
tmp104 = tl.load(in_ptr0 + (15))
tmp105 = tl.broadcast_to(tmp104, [XBLOCK])
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp25 = tmp24 - tmp2
tmp26 = tmp25 * tmp25
tmp29 = tmp28 - tmp7
tmp30 = tmp29 * tmp29
tmp31 = tmp26 + tmp30
tmp34 = tmp33 - tmp13
tmp35 = tmp34 * tmp34
tmp36 = tmp31 + tmp35
tmp39 = tmp38 - tmp19
tmp40 = tmp39 * tmp39
tmp41 = tmp36 + tmp40
tmp42 = tmp22 < tmp41
tmp43 = tmp22 == tmp41
tmp44 = tmp22 != tmp22
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 0, tl.int64)
tmp51 = tl.full([1], 1, tl.int64)
tmp52 = tmp50 < tmp51
tmp53 = tmp49 & tmp52
tmp54 = tmp47 | tmp53
tmp55 = tl.where(tmp54, tmp22, tmp41)
tmp56 = tl.where(tmp54, tmp50, tmp51)
tmp59 = tmp58 - tmp2
tmp60 = tmp59 * tmp59
tmp63 = tmp62 - tmp7
tmp64 = tmp63 * tmp63
tmp65 = tmp60 + tmp64
tmp68 = tmp67 - tmp13
tmp69 = tmp68 * tmp68
tmp70 = tmp65 + tmp69
tmp73 = tmp72 - tmp19
tmp74 = tmp73 * tmp73
tmp75 = tmp70 + tmp74
tmp76 = tmp55 < tmp75
tmp77 = tmp55 == tmp75
tmp78 = tmp55 != tmp55
tmp79 = tmp75 != tmp75
tmp80 = tmp78 > tmp79
tmp81 = tmp76 | tmp80
tmp82 = tmp78 & tmp79
tmp83 = tmp77 | tmp82
tmp84 = tl.full([1], 2, tl.int64)
tmp85 = tmp56 < tmp84
tmp86 = tmp83 & tmp85
tmp87 = tmp81 | tmp86
tmp88 = tl.where(tmp87, tmp55, tmp75)
tmp89 = tl.where(tmp87, tmp56, tmp84)
tmp92 = tmp91 - tmp2
tmp93 = tmp92 * tmp92
tmp96 = tmp95 - tmp7
tmp97 = tmp96 * tmp96
tmp98 = tmp93 + tmp97
tmp101 = tmp100 - tmp13
tmp102 = tmp101 * tmp101
tmp103 = tmp98 + tmp102
tmp106 = tmp105 - tmp19
tmp107 = tmp106 * tmp106
tmp108 = tmp103 + tmp107
tmp109 = tmp88 < tmp108
tmp110 = tmp88 == tmp108
tmp111 = tmp88 != tmp88
tmp112 = tmp108 != tmp108
tmp113 = tmp111 > tmp112
tmp114 = tmp109 | tmp113
tmp115 = tmp111 & tmp112
tmp116 = tmp110 | tmp115
tmp117 = tl.full([1], 3, tl.int64)
tmp118 = tmp89 < tmp117
tmp119 = tmp116 & tmp118
tmp120 = tmp114 | tmp119
tmp121 = tl.where(tmp120, tmp88, tmp108)
tmp122 = tl.where(tmp120, tmp89, tmp117)
tl.store(out_ptr0 + (x0), tmp122, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ke/ckeed7ft5awwt6cgiivpt7j5svqkkbokukhvltl2gbosi67ikjtc.py
# Topologically Sorted Source Nodes: [q_data], Original ATen: [aten.index]
# Source node to ATen node mapping:
# q_data => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_2, [%argmin]), kwargs = {})
triton_poi_fused_index_1 = async_compile.triton('triton_poi_fused_index_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [delta, mul, dist, q_idx], Original ATen: [aten.sub, aten.mul, aten.sum, aten.argmin]
stream0 = get_raw_stream(0)
triton_poi_fused_argmin_mul_sub_sum_0.run(primals_2, primals_1, buf0, 4, grid=grid(4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_data], Original ATen: [aten.index]
triton_poi_fused_index_1.run(buf0, primals_2, buf1, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf1, buf0, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_argmin_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + 2)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + 3)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK])
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + 4)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp27 = tl.load(in_ptr0 + 5)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp32 = tl.load(in_ptr0 + 6)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + 7)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK])
tmp57 = tl.load(in_ptr0 + 8)
tmp58 = tl.broadcast_to(tmp57, [XBLOCK])
tmp61 = tl.load(in_ptr0 + 9)
tmp62 = tl.broadcast_to(tmp61, [XBLOCK])
tmp66 = tl.load(in_ptr0 + 10)
tmp67 = tl.broadcast_to(tmp66, [XBLOCK])
tmp71 = tl.load(in_ptr0 + 11)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK])
tmp90 = tl.load(in_ptr0 + 12)
tmp91 = tl.broadcast_to(tmp90, [XBLOCK])
tmp94 = tl.load(in_ptr0 + 13)
tmp95 = tl.broadcast_to(tmp94, [XBLOCK])
tmp99 = tl.load(in_ptr0 + 14)
tmp100 = tl.broadcast_to(tmp99, [XBLOCK])
tmp104 = tl.load(in_ptr0 + 15)
tmp105 = tl.broadcast_to(tmp104, [XBLOCK])
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp25 = tmp24 - tmp2
tmp26 = tmp25 * tmp25
tmp29 = tmp28 - tmp7
tmp30 = tmp29 * tmp29
tmp31 = tmp26 + tmp30
tmp34 = tmp33 - tmp13
tmp35 = tmp34 * tmp34
tmp36 = tmp31 + tmp35
tmp39 = tmp38 - tmp19
tmp40 = tmp39 * tmp39
tmp41 = tmp36 + tmp40
tmp42 = tmp22 < tmp41
tmp43 = tmp22 == tmp41
tmp44 = tmp22 != tmp22
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 0, tl.int64)
tmp51 = tl.full([1], 1, tl.int64)
tmp52 = tmp50 < tmp51
tmp53 = tmp49 & tmp52
tmp54 = tmp47 | tmp53
tmp55 = tl.where(tmp54, tmp22, tmp41)
tmp56 = tl.where(tmp54, tmp50, tmp51)
tmp59 = tmp58 - tmp2
tmp60 = tmp59 * tmp59
tmp63 = tmp62 - tmp7
tmp64 = tmp63 * tmp63
tmp65 = tmp60 + tmp64
tmp68 = tmp67 - tmp13
tmp69 = tmp68 * tmp68
tmp70 = tmp65 + tmp69
tmp73 = tmp72 - tmp19
tmp74 = tmp73 * tmp73
tmp75 = tmp70 + tmp74
tmp76 = tmp55 < tmp75
tmp77 = tmp55 == tmp75
tmp78 = tmp55 != tmp55
tmp79 = tmp75 != tmp75
tmp80 = tmp78 > tmp79
tmp81 = tmp76 | tmp80
tmp82 = tmp78 & tmp79
tmp83 = tmp77 | tmp82
tmp84 = tl.full([1], 2, tl.int64)
tmp85 = tmp56 < tmp84
tmp86 = tmp83 & tmp85
tmp87 = tmp81 | tmp86
tmp88 = tl.where(tmp87, tmp55, tmp75)
tmp89 = tl.where(tmp87, tmp56, tmp84)
tmp92 = tmp91 - tmp2
tmp93 = tmp92 * tmp92
tmp96 = tmp95 - tmp7
tmp97 = tmp96 * tmp96
tmp98 = tmp93 + tmp97
tmp101 = tmp100 - tmp13
tmp102 = tmp101 * tmp101
tmp103 = tmp98 + tmp102
tmp106 = tmp105 - tmp19
tmp107 = tmp106 * tmp106
tmp108 = tmp103 + tmp107
tmp109 = tmp88 < tmp108
tmp110 = tmp88 == tmp108
tmp111 = tmp88 != tmp88
tmp112 = tmp108 != tmp108
tmp113 = tmp111 > tmp112
tmp114 = tmp109 | tmp113
tmp115 = tmp111 & tmp112
tmp116 = tmp110 | tmp115
tmp117 = tl.full([1], 3, tl.int64)
tmp118 = tmp89 < tmp117
tmp119 = tmp116 & tmp118
tmp120 = tmp114 | tmp119
tl.where(tmp120, tmp88, tmp108)
tmp122 = tl.where(tmp120, tmp89, tmp117)
tl.store(out_ptr0 + x0, tmp122, xmask)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_argmin_mul_sub_sum_0[grid(4)](primals_2, primals_1,
buf0, 4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_index_1[grid(16)](buf0, primals_2, buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, buf0, buf0
class QuantizingNew(nn.Module):
"""
This is quantizing layer.
"""
__initialized: 'bool' = True
def __init__(self, num_quantizing: 'int', quantizing_dim: 'int',
_weight: 'torch.Tensor'=None, initialize_by_dataset: 'bool'=True,
mean: 'float'=0.0, std: 'float'=1.0, dtype: 'torch.dtype'=None,
device: 'torch.device'=None):
super().__init__()
assert num_quantizing > 0
assert quantizing_dim > 0
self.num_quantizing = num_quantizing
self.quantizing_dim = quantizing_dim
self.initialize_by_dataset = initialize_by_dataset
self.mean, self.std = mean, std
if _weight is None:
self.weight = nn.Parameter(torch.empty(num_quantizing,
quantizing_dim, dtype=dtype, device=device))
nn.init.normal_(self.weight, mean=mean, std=std)
if initialize_by_dataset:
self.__initialized = False
self.__initialized_length = 0
else:
assert _weight.dim() == 2
assert _weight.size(0) == num_quantizing
assert _weight.size(1) == quantizing_dim
self.weight = nn.Parameter(_weight.to(device))
def from_idx(self, idx: 'torch.Tensor') ->torch.Tensor:
"""
idx: shape is (*, ). int tensor.
return -> (*, E) float tensor
"""
input_size = idx.shape
i = idx.view(-1)
q_data = self.weight[i].view(*input_size, self.quantizing_dim)
return q_data
def load_state_dict(self, state_dict, strict: 'bool'):
self.__initialized = True
return super().load_state_dict(state_dict, strict=strict)
def __repr__(self):
s = f'Quantizing({self.num_quantizing}, {self.quantizing_dim})'
return s
def isInitialized(self) ->bool:
return self.__initialized
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| Geson-anko/VQ_AutoEncoder | Quantizing | false | 2,289 | [
"MIT"
] | 0 | 62e1694de38ea6f152891e19abc190ad4048e587 | https://github.com/Geson-anko/VQ_AutoEncoder/tree/62e1694de38ea6f152891e19abc190ad4048e587 |
h_swish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/lq/clqeppj4y7zg3pimgkc3jwu5feu5fyckp56vd63n4y2itru2coxs.py
# Topologically Sorted Source Nodes: [add, hardtanh, truediv, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# hardtanh => clamp_max, clamp_min
# mul => mul
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, hardtanh, truediv, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swishNew(nn.Module):
def __init__(self, inplace=True):
super(h_swishNew, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| CYHYCY/voice-classification | h_swish | false | 17,071 | [
"Apache-2.0"
] | 8 | a6f62e2f1c39b08323da3632411f4ba6b04d5f37 | https://github.com/CYHYCY/voice-classification/tree/a6f62e2f1c39b08323da3632411f4ba6b04d5f37 |
Critic | import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (256, 8), (8, 1))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (256, 256), (256, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (256, 8), (8, 1))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256, 256), (256, 1))
assert_size_stride(primals_12, (256,), (1,))
assert_size_stride(primals_13, (1, 256), (256, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1024)](buf2, primals_4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), (
1, 256), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(1024)](buf4, primals_6, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 256), (1,
8), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(1024)](buf8, primals_10, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (256, 256),
(1, 256), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_1[grid(1024)](buf10, primals_12, 1024, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
class CriticNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(CriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_9 = self.l4.weight
primals_10 = self.l4.bias
primals_11 = self.l5.weight
primals_12 = self.l5.bias
primals_13 = self.l6.weight
primals_14 = self.l6.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
| ChristianLin0420/DeepRL | Critic | false | 2,110 | [
"MIT"
] | 0 | 143a9bfebd264229d9d26fcdc070065225774e04 | https://github.com/ChristianLin0420/DeepRL/tree/143a9bfebd264229d9d26fcdc070065225774e04 |
FocalLoss | import torch
import torch.nn as nn
import torch.optim
class FocalLoss(torch.nn.Module):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t)^gamma.
alpha: optional alpha weighting factor to balance positives vs negatives,
with alpha in [0, 1] for class 1 and 1-alpha for class 0.
In practice alpha may be set by inverse class frequency,
so that for a low number of positives, its weight is high.
"""
super(FocalLoss, self).__init__()
self._alpha = alpha
self._gamma = gamma
self.BCEWithLogits = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, prediction_tensor, target_tensor):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
per_entry_cross_ent = self.BCEWithLogits(prediction_tensor,
target_tensor)
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = target_tensor * prediction_probabilities + (1 - target_tensor
) * (1 - prediction_probabilities)
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = target_tensor * self._alpha + (1 -
target_tensor) * (1 - self._alpha)
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return torch.mean(focal_cross_entropy_loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = tmp4 - tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp9 = tmp4 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 0.25
tmp12 = tmp0 * tmp11
tmp13 = 0.75
tmp14 = tmp5 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp10 * tmp15
tmp17 = tmp5 * tmp1
tmp18 = 0.0
tmp19 = triton_helpers.minimum(tmp18, tmp1)
tmp20 = tl_math.abs(tmp1)
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = libdevice.log1p(tmp22)
tmp24 = tmp19 - tmp23
tmp25 = tmp17 - tmp24
tmp26 = tmp16 * tmp25
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = 256.0
tmp31 = tmp29 / tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[
grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLossNew(torch.nn.Module):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t)^gamma.
alpha: optional alpha weighting factor to balance positives vs negatives,
with alpha in [0, 1] for class 1 and 1-alpha for class 0.
In practice alpha may be set by inverse class frequency,
so that for a low number of positives, its weight is high.
"""
super(FocalLossNew, self).__init__()
self._alpha = alpha
self._gamma = gamma
self.BCEWithLogits = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ValerioB88/self-supervised-relational-reasoning | FocalLoss | false | 9,710 | [
"MIT"
] | 0 | 12692b93d5c8dd3f56a31aa8b790366556e7a621 | https://github.com/ValerioB88/self-supervised-relational-reasoning/tree/12692b93d5c8dd3f56a31aa8b790366556e7a621 |
SE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# out => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/o5/co5kpgkyaabh4nd7yz4gzpyl7x35mwdhgusbruykvtydzlq2lizg.py
# Topologically Sorted Source Nodes: [conv2d, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lp/clprvnh5p6cmadxtwzizwydrpjlwxohxixbw4ntucp6srbu6gtis.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# out_2 => sigmoid
# out_3 => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5
class SENew(nn.Module):
"""Squeeze-and-Excitation block."""
def __init__(self, in_planes, se_planes):
super(SENew, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, input_0):
primals_2 = self.se1.weight
primals_3 = self.se1.bias
primals_4 = self.se2.weight
primals_5 = self.se2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Geunwoo-Jeon/pytorch-cifar | SE | false | 2,344 | [
"MIT"
] | 0 | b06eeb65bbc0a4eccd124ed3c5367da70ab1ed20 | https://github.com/Geunwoo-Jeon/pytorch-cifar/tree/b06eeb65bbc0a4eccd124ed3c5367da70ab1ed20 |
GaussianKernel | import torch
from typing import Optional
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class GaussianKernel(nn.Module):
"""Gaussian Kernel Matrix
Gaussian Kernel k is defined by
.. math::
k(x_1, x_2) = \\exp \\left( - \\dfrac{\\| x_1 - x_2 \\|^2}{2\\sigma^2} \\right)
where :math:`x_1, x_2 \\in R^d` are 1-d tensors.
Gaussian Kernel Matrix K is defined on input group :math:`X=(x_1, x_2, ..., x_m),`
.. math::
K(X)_{i,j} = k(x_i, x_j)
Also by default, during training this layer keeps running estimates of the
mean of L2 distances, which are then used to set hyperparameter :math:`\\sigma`.
Mathematically, the estimation is :math:`\\sigma^2 = \\dfrac{\\alpha}{n^2}\\sum_{i,j} \\| x_i - x_j \\|^2`.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and use a fixed :math:`\\sigma` instead.
Parameters:
- sigma (float, optional): bandwidth :math:`\\sigma`. Default: None
- track_running_stats (bool, optional): If ``True``, this module tracks the running mean of :math:`\\sigma^2`.
Otherwise, it won't track such statistics and always uses fix :math:`\\sigma^2`. Default: ``True``
- alpha (float, optional): :math:`\\alpha` which decides the magnitude of :math:`\\sigma^2` when track_running_stats is set to ``True``
Inputs:
- X (tensor): input group :math:`X`
Shape:
- Inputs: :math:`(minibatch, F)` where F means the dimension of input features.
- Outputs: :math:`(minibatch, minibatch)`
"""
def __init__(self, sigma: 'Optional[float]'=None, track_running_stats:
'Optional[bool]'=True, alpha: 'Optional[float]'=1.0):
super(GaussianKernel, self).__init__()
assert track_running_stats or sigma is not None
self.sigma_square = torch.tensor(sigma * sigma
) if sigma is not None else None
self.track_running_stats = track_running_stats
self.alpha = alpha
def forward(self, X: 'torch.Tensor') ->torch.Tensor:
l2_distance_square = ((X.unsqueeze(0) - X.unsqueeze(1)) ** 2).sum(2)
if self.track_running_stats:
self.sigma_square = self.alpha * torch.mean(l2_distance_square.
detach())
return torch.exp(-l2_distance_square / (2 * self.sigma_square))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from typing import Optional
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0(in_out_ptr0,
in_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16 % 4
r2 = rindex // 64
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tmp24 = 1.0
tmp25 = tmp23 * tmp24
tmp26 = -tmp18
tmp27 = 2.0
tmp28 = tmp25 * tmp27
tmp29 = tmp26 / tmp28
tmp30 = tl_math.exp(tmp29)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None)
tl.store(out_ptr1 + tl.broadcast_to(r3, [RBLOCK]), tmp30, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0[grid(1)](buf2,
arg0_1, buf3, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf3, buf2
class GaussianKernelNew(nn.Module):
"""Gaussian Kernel Matrix
Gaussian Kernel k is defined by
.. math::
k(x_1, x_2) = \\exp \\left( - \\dfrac{\\| x_1 - x_2 \\|^2}{2\\sigma^2} \\right)
where :math:`x_1, x_2 \\in R^d` are 1-d tensors.
Gaussian Kernel Matrix K is defined on input group :math:`X=(x_1, x_2, ..., x_m),`
.. math::
K(X)_{i,j} = k(x_i, x_j)
Also by default, during training this layer keeps running estimates of the
mean of L2 distances, which are then used to set hyperparameter :math:`\\sigma`.
Mathematically, the estimation is :math:`\\sigma^2 = \\dfrac{\\alpha}{n^2}\\sum_{i,j} \\| x_i - x_j \\|^2`.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and use a fixed :math:`\\sigma` instead.
Parameters:
- sigma (float, optional): bandwidth :math:`\\sigma`. Default: None
- track_running_stats (bool, optional): If ``True``, this module tracks the running mean of :math:`\\sigma^2`.
Otherwise, it won't track such statistics and always uses fix :math:`\\sigma^2`. Default: ``True``
- alpha (float, optional): :math:`\\alpha` which decides the magnitude of :math:`\\sigma^2` when track_running_stats is set to ``True``
Inputs:
- X (tensor): input group :math:`X`
Shape:
- Inputs: :math:`(minibatch, F)` where F means the dimension of input features.
- Outputs: :math:`(minibatch, minibatch)`
"""
def __init__(self, sigma: 'Optional[float]'=None, track_running_stats:
'Optional[bool]'=True, alpha: 'Optional[float]'=1.0):
super(GaussianKernelNew, self).__init__()
assert track_running_stats or sigma is not None
self.sigma_square = torch.tensor(sigma * sigma
) if sigma is not None else None
self.track_running_stats = track_running_stats
self.alpha = alpha
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| NiteshBharadwaj/ignoringhumanpose | GaussianKernel | false | 909 | [
"MIT"
] | 0 | 1fb7a063fded9cff18f7de4e1d71845983077256 | https://github.com/NiteshBharadwaj/ignoringhumanpose/tree/1fb7a063fded9cff18f7de4e1d71845983077256 |
BilinearConvLayer | import torch
def setup_conv(in_channels, out_channels, kernel_size, bias, padding_mode,
stride=1, Conv=torch.nn.Conv2d):
return Conv(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, padding=(kernel_size - 1) // 2, stride=
stride, bias=bias)
class BilinearConvLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, bilin_channels=None,
padding_mode='zeros', Conv=torch.nn.Conv2d, nonlinearity=torch.nn.
Identity(), norm=torch.nn.Identity(), kernel_size=3):
super(BilinearConvLayer, self).__init__()
bilin_channels = (output_channels if bilin_channels is None else
bilin_channels)
self.chgrp1 = max(0, output_channels - bilin_channels)
self.chgrp2 = bilin_channels
self.layer1 = setup_conv(in_channels=input_channels, out_channels=
self.chgrp1 + 2 * self.chgrp2, kernel_size=kernel_size, bias=
True, padding_mode=padding_mode, stride=1, Conv=Conv)
self.norm = norm
self.nonlinearity = nonlinearity
def forward(self, x):
y = self.nonlinearity(self.norm(self.layer1(x)))
mid = self.chgrp1 + self.chgrp2
y1, y2, y3 = y[:, :self.chgrp1], y[:, self.chgrp1:mid], y[:, mid:]
z = y2 * y3
out = torch.cat((y1, z), dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def setup_conv(in_channels, out_channels, kernel_size, bias, padding_mode,
stride=1, Conv=torch.nn.Conv2d):
return Conv(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, padding=(kernel_size - 1) // 2, stride=
stride, bias=bias)
class BilinearConvLayerNew(torch.nn.Module):
def __init__(self, input_channels, output_channels, bilin_channels=None,
padding_mode='zeros', Conv=torch.nn.Conv2d, nonlinearity=torch.nn.
Identity(), norm=torch.nn.Identity(), kernel_size=3):
super(BilinearConvLayerNew, self).__init__()
bilin_channels = (output_channels if bilin_channels is None else
bilin_channels)
self.chgrp1 = max(0, output_channels - bilin_channels)
self.chgrp2 = bilin_channels
self.layer1 = setup_conv(in_channels=input_channels, out_channels=
self.chgrp1 + 2 * self.chgrp2, kernel_size=kernel_size, bias=
True, padding_mode=padding_mode, stride=1, Conv=Conv)
self.norm = norm
self.nonlinearity = nonlinearity
def forward(self, input_0):
primals_1 = self.layer1.weight
primals_2 = self.layer1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| m-dml/lil2021swe | BilinearConvLayer | false | 7,152 | [
"Apache-2.0"
] | 1 | 45352f214ec28c9f91dd24ed3669f492d8b68382 | https://github.com/m-dml/lil2021swe/tree/45352f214ec28c9f91dd24ed3669f492d8b68382 |
ToSEG | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s3/cs3vevvlveudb7oguup5ljgcyslvygs2cnrc5347em4iypopundn.py
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_2 => mul_2
# weight => mul_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r2/cr263a6gzji5hcuzutpzrubs2olns2ao2sa7aaaziqrb7stxhlqd.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm]
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_5, buf2, buf3, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf4, (1, 16, 4, 4), (256, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf5, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (16, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 4, 1, 1), (16, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
triton_poi_fused_mul_2[grid(64)](primals_5, buf2, buf3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4,
1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf4, (1, 16, 4, 4), (256, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_add_3[grid(256)](buf5, primals_6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_6
return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (16,
4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4,
4), (256, 16, 4, 1), 0)
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *
rest_dim), negative_slope=0.2) * scale
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
def upsample(in_tens, out_H=64):
in_H = in_tens.shape[2]
scale_factor = 1.0 * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear',
align_corners=False)(in_tens)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, bias, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
if bias:
grad_bias = grad_input.sum(dim).detach()
else:
grad_bias = empty
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = bias is not None
if bias is None:
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale)
if not ctx.bias:
grad_bias = None
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class ToSEGNew(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, upsample=True,
blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, out_channel, 1, style_dim,
demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input_0, input_1):
primals_6 = self.bias
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| mfredriksz/semanticGAN_code | ToSEG | false | 16,055 | [
"BSD-2-Clause",
"MIT"
] | 107 | c6e7b490086afd8a7593e2892452295555910494 | https://github.com/mfredriksz/semanticGAN_code/tree/c6e7b490086afd8a7593e2892452295555910494 |
AvgPoolPad | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/pr/cprzlfpjjqlj6tudvbc455jxno35xlnta4wgmkbc6uo5zmcxii4s.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d]
# Source node to ATen node mapping:
# x => constant_pad_nd
# x_1 => avg_pool2d
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], False, False), kwargs = {})
triton_poi_fused_avg_pool2d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 3) % 3
x0 = xindex % 3
x2 = (xindex // 9)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = (-2) + (2*x1)
tmp12 = tmp11 >= tmp1
tmp13 = (-2) + (2*x0)
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2*x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = tmp29 + tmp19
tmp31 = 1 + (2*x0)
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = tmp40 + tmp30
tmp42 = 2*x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tmp51 + tmp41
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = tmp58 + tmp52
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = tmp65 + tmp59
tmp67 = 1 + (2*x1)
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = tmp76 + tmp66
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tmp83 + tmp77
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = tmp90 + tmp84
tmp92 = (((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5))))
tmp93 = tmp91 / tmp92
tl.store(out_ptr0 + (x4), tmp93, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_constant_pad_nd_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = tmp29 + tmp19
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = tmp40 + tmp30
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tmp51 + tmp41
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = tmp58 + tmp52
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = tmp65 + tmp59
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = tmp76 + tmp66
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tmp83 + tmp77
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = tmp90 + tmp84
tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (
0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 *
(5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 +
2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 *
x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) +
(2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + (
-1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 *
x0) * (2 + 2 * x0 < 5))
tmp93 = tmp91 / tmp92
tl.store(out_ptr0 + x4, tmp93, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1,
buf0, 144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4),
class AvgPoolPadNew(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dowhilefalse/DeOldify | AvgPoolPad | false | 12,305 | [
"MIT"
] | 0 | 08f012cdbe36e3f8482460f57e1844b361a7fb16 | https://github.com/dowhilefalse/DeOldify/tree/08f012cdbe36e3f8482460f57e1844b361a7fb16 |
ZeroConv2d | import torch
from torch import nn
from torch.nn import functional as F
class ZeroConv2d(nn.Module):
def __init__(self, in_channel, out_channel, padding=1):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input):
out = F.pad(input, [1, 1, 1, 1], value=1)
out = self.conv(out)
out = out * torch.exp(self.scale * 3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=1.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_exp_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_exp_mul_1[grid(256)](buf2, primals_3,
primals_4, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf3, primals_2, primals_4, buf0, buf2
class ZeroConv2dNew(nn.Module):
def __init__(self, in_channel, out_channel, padding=1):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input_0):
primals_4 = self.scale
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| mbaddar1/glow-pytorch | ZeroConv2d | false | 7,182 | [
"MIT"
] | 1 | e07ca542ce4dd93ddf680c51eda25d1f9db252a1 | https://github.com/mbaddar1/glow-pytorch/tree/e07ca542ce4dd93ddf680c51eda25d1f9db252a1 |
StyledConv | import math
import torch
from torch import nn
import torch.utils.checkpoint
from torch.nn import functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
if input.ndim == 3:
return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]),
negative_slope=negative_slope) * scale
else:
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1
], pad[0], pad[1])
return out
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class StyledConv(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
out = self.activate(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4,
'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
import torch.utils.checkpoint
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r5 = rindex
x0 = xindex % 4
r3 = rindex // 16
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp4 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp12, xmask)
tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 25
x2 = xindex // 100
x1 = xindex // 25 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = 1.4142135623730951
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0
del buf0
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5,
buf2, buf5, 16, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4,
4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1))
buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32)
buf8 = torch.ops.aten.normal_functional.default(buf7)
del buf7
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32
)
triton_poi_fused_add_leaky_relu_mul_3[grid(400)](buf6, primals_6,
buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_6
del primals_7
return buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5,
(16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
if input.ndim == 3:
return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]),
negative_slope=negative_slope) * scale
else:
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1
], pad[0], pad[1])
return out
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class StyledConvNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input_0, input_1):
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_6 = self.noise.weight
primals_7 = self.activate.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Dokhyam/StyleCLIP | StyledConv | false | 9,170 | [
"MIT"
] | 0 | 3953c6fda14672762897d3ee16c0458dc848c21d | https://github.com/Dokhyam/StyleCLIP/tree/3953c6fda14672762897d3ee16c0458dc848c21d |
BiInteractionPooling | import torch
import torch.nn as nn
import torch.utils.data
class BiInteractionPooling(nn.Module):
def __init__(self):
super(BiInteractionPooling, self).__init__()
def forward(self, inputs):
concated_embeds_value = inputs
square_of_sum = torch.pow(torch.sum(concated_embeds_value, dim=1,
keepdim=True), 2)
sum_of_square = torch.sum(concated_embeds_value *
concated_embeds_value, dim=1, keepdim=True)
cross_term = 0.5 * (square_of_sum - sum_of_square)
return cross_term
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp0 * tmp0
tmp9 = tmp1 * tmp1
tmp10 = tmp8 + tmp9
tmp11 = tmp3 * tmp3
tmp12 = tmp10 + tmp11
tmp13 = tmp5 * tmp5
tmp14 = tmp12 + tmp13
tmp15 = tmp7 - tmp14
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(64)](arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class BiInteractionPoolingNew(nn.Module):
def __init__(self):
super(BiInteractionPoolingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Holldean/pytorch-models | BiInteractionPooling | false | 2,342 | [
"MIT"
] | 0 | 9509d0d462b1a98164b266d49ada199071a855ac | https://github.com/Holldean/pytorch-models/tree/9509d0d462b1a98164b266d49ada199071a855ac |
MaxPoolStride1 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/hx/chx5m6qxrcu6wal56js3crjy4s6tfrcj5rpafrisgnvm7f2fknk4.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (x1)) + (x1) * ((x1) < (3)))) + (16*x2) + ((3) * ((3) <= (x0)) + (x0) * ((x0) < (3)))), xmask)
tmp1 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (x1)) + (x1) * ((x1) < (3)))) + (16*x2) + ((3) * ((3) <= (1 + x0)) + (1 + x0) * ((1 + x0) < (3)))), xmask)
tmp3 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + x1)) + (1 + x1) * ((1 + x1) < (3)))) + (16*x2) + ((3) * ((3) <= (x0)) + (x0) * ((x0) < (3)))), xmask)
tmp5 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + x1)) + (1 + x1) * ((1 + x1) < (3)))) + (16*x2) + ((3) * ((3) <= (1 + x0)) + (1 + x0) * ((1 + x0) < (3)))), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= x1) + x1 * (x1 < 3)) + 16 * x2 +
(3 * (3 <= x0) + x0 * (x0 < 3))), xmask)
tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= x1) + x1 * (x1 < 3)) + 16 * x2 +
(3 * (3 <= 1 + x0) + (1 + x0) * (1 + x0 < 3))), xmask)
tmp3 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + x1) + (1 + x1) * (1 + x1 <
3)) + 16 * x2 + (3 * (3 <= x0) + x0 * (x0 < 3))), xmask)
tmp5 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + x1) + (1 + x1) * (1 + x1 <
3)) + 16 * x2 + (3 * (3 <= 1 + x0) + (1 + x0) * (1 + x0 < 3))), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPoolStride1New(nn.Module):
def __init__(self):
super(MaxPoolStride1New, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Abdul-Mukit/ssp_with_hand_tracking | MaxPoolStride1 | false | 11,155 | [
"MIT"
] | 0 | 04429ac9789283694a9176b94f70ab4e5a8c0727 | https://github.com/Abdul-Mukit/ssp_with_hand_tracking/tree/04429ac9789283694a9176b94f70ab4e5a8c0727 |
HFM | import torch
import torch.nn as nn
class HFM(nn.Module):
def __init__(self, k=2):
super().__init__()
self.k = k
self.net = nn.Sequential(nn.AvgPool2d(kernel_size=self.k, stride=
self.k), nn.Upsample(scale_factor=self.k, mode='nearest'))
def forward(self, tL):
assert tL.shape[2] % self.k == 0, 'h, w must divisible by k'
return tL - self.net(tL)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_avg_pool2d_sub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = x1
tmp2 = tmp1.to(tl.float32)
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4.to(tl.int32)
tmp6 = x0
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp7 * tmp3
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.load(in_ptr0 + (2 * tmp9 + 8 * tmp5 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (1 + 2 * tmp9 + 8 * tmp5 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tmp11 + tmp10
tmp13 = tl.load(in_ptr0 + (4 + 2 * tmp9 + 8 * tmp5 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp14 = tmp13 + tmp12
tmp15 = tl.load(in_ptr0 + (5 + 2 * tmp9 + 8 * tmp5 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp16 = tmp15 + tmp14
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tmp19 = tmp0 - tmp18
tl.store(out_ptr0 + x3, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_avg_pool2d_sub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HFMNew(nn.Module):
def __init__(self, k=2):
super().__init__()
self.k = k
self.net = nn.Sequential(nn.AvgPool2d(kernel_size=self.k, stride=
self.k), nn.Upsample(scale_factor=self.k, mode='nearest'))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| YingqiLiulll/scrips_for_SR | HFM | false | 1,251 | [
"MIT"
] | 0 | 04fa6fdaf157e913d3e2521cd80315a10a2ccedc | https://github.com/YingqiLiulll/scrips_for_SR/tree/04fa6fdaf157e913d3e2521cd80315a10a2ccedc |
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/w7/cw766c4uxduz3dqmxkdfvgucfoooblkc54km6jr3q5kk3krunkze.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| ArrowLuo/GRACE | ScaledDotProductAttention | false | 7,744 | [
"Apache-2.0"
] | 17 | f27b500ba905685c03eee6d91d87adc9ef78b4d1 | https://github.com/ArrowLuo/GRACE/tree/f27b500ba905685c03eee6d91d87adc9ef78b4d1 |
IndepAnisotropicGaussianUVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2f/c2fihp3eabdoclhz6gdz723nsdjyue5ykxbe3cdbfc2itfhvb5zw.py
# Topologically Sorted Source Nodes: [softplus, sigma2, pow_1, pow_2, r_sqnorm2, add_4, denom2, log, add_5, delta_u, pow_3, delta_v, pow_4, delta_sqnorm, truediv, add_6, delta_u_r_u, delta_v_r_v, delta_r, delta_r_sqnorm, truediv_1, sub_2, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.pow, aten.mul, aten.log, aten.sub, aten.div, aten.sum]
# Source node to ATen node mapping:
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# delta_r => add_3
# delta_r_sqnorm => pow_5
# delta_sqnorm => add_2
# delta_u => sub
# delta_u_r_u => mul
# delta_v => sub_1
# delta_v_r_v => mul_1
# denom2 => mul_2
# log => log
# loss => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# r_sqnorm2 => add_1
# sigma2 => add
# softplus => exp, gt, log1p, where
# sub_2 => sub_2
# sum_1 => sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg2_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_4), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul_2,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, 1.8378770664093453), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg4_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg5_1, %arg6_1), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %div), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg2_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_3, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_5, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %div_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 7, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp8 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr2 + (r0), None)
tmp18 = tl.load(in_ptr3 + (r0), None)
tmp19 = tl.load(in_ptr4 + (r0), None)
tmp22 = tl.load(in_ptr5 + (r0), None)
tmp23 = tl.load(in_ptr6 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp9 = tmp8 * tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp7 * tmp13
tmp15 = tl_math.log(tmp14)
tmp16 = 1.8378770664093453
tmp17 = tmp15 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp17 + tmp27
tmp29 = tmp20 * tmp8
tmp30 = tmp24 * tmp10
tmp31 = tmp29 + tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp32 / tmp14
tmp34 = tmp28 - tmp33
tmp35 = 0.5
tmp36 = tmp34 * tmp35
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp39, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sigma2, pow_1, pow_2, r_sqnorm2, add_4, denom2, log, add_5, delta_u, pow_3, delta_v, pow_4, delta_sqnorm, truediv, add_6, delta_u_r_u, delta_v_r_v, delta_r, delta_r_sqnorm, truediv_1, sub_2, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.pow, aten.mul, aten.log, aten.sub, aten.div, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg6_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp18 = tl.load(in_ptr3 + r0, None)
tmp19 = tl.load(in_ptr4 + r0, None)
tmp22 = tl.load(in_ptr5 + r0, None)
tmp23 = tl.load(in_ptr6 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp9 = tmp8 * tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp7 * tmp13
tmp15 = tl_math.log(tmp14)
tmp16 = 1.8378770664093453
tmp17 = tmp15 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp17 + tmp27
tmp29 = tmp20 * tmp8
tmp30 = tmp24 * tmp10
tmp31 = tmp29 + tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp32 / tmp14
tmp34 = tmp28 - tmp33
tmp35 = 0.5
tmp36 = tmp34 * tmp35
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1
, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
return buf1,
class IndepAnisotropicGaussianUVLossNew(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IndepAnisotropicGaussianUVLossNew, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
arg6_1 = input_6
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return output[0]
| FluteXu/DW-Research | IndepAnisotropicGaussianUVLoss | false | 13,692 | [
"Apache-2.0"
] | 780 | 6b559d2d1d440c07e5936a65cd74a3bc657962dc | https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc |
Lagrange | import torch
import torch.nn as nn
import torch.utils.data
def objective(x, h):
return torch.log(1 + torch.sum(x * h, dim=1))
class Lagrange(nn.Module):
def __init__(self):
super(Lagrange, self).__init__()
def forward(self, approx, dual, h):
result = -objective(approx, h) + dual
return torch.mean(result)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_log_mul_neg_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = -tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_per_fused_add_log_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + r2, None)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 256.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_log_mul_neg_sum_0[grid(64)](arg0_1, arg1_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_log_mean_mul_neg_sum_1[grid(1)](buf2, buf0,
arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
def objective(x, h):
return torch.log(1 + torch.sum(x * h, dim=1))
class LagrangeNew(nn.Module):
def __init__(self):
super(LagrangeNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| goldenBill/Power_Control | Lagrange | false | 10,099 | [
"MIT"
] | 0 | 8218aaffe8d5c69da454f76ecdacce46340cb81c | https://github.com/goldenBill/Power_Control/tree/8218aaffe8d5c69da454f76ecdacce46340cb81c |
WideResNet | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
m = 2
def __init__(self, in_planes, out_planes, stride, dropout, fixup_l,
fixup_coeff):
super(BasicBlock, self).__init__()
self._dropout = dropout
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride
=stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.equalInOut = in_planes == out_planes
self.conv_res = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=stride, padding=0, bias=False)
self.conv_res = not self.equalInOut and self.conv_res or None
self.scale = nn.Parameter(torch.ones(1))
self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in
range(4)])
k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1
] * self.conv1.out_channels
self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 *
self.m - 2)) * math.sqrt(2.0 / k))
self.conv2.weight.data.zero_()
if self.conv_res is not None:
k = self.conv_res.kernel_size[0] * self.conv_res.kernel_size[1
] * self.conv_res.out_channels
self.conv_res.weight.data.normal_(0, math.sqrt(2.0 / k))
def forward(self, x):
x_out = self.relu(x + self.biases[0])
out = self.conv1(x_out) + self.biases[1]
out = self.relu(out) + self.biases[2]
if self._dropout > 0:
out = F.dropout(out, p=self._dropout, training=self.training)
out = self.scale * self.conv2(out) + self.biases[3]
if self.equalInOut:
return torch.add(x, out)
return torch.add(self.conv_res(x_out), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride,
dropout, fixup_l, fixup_coeff):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes,
nb_layers, stride, dropout, fixup_l, fixup_coeff)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
dropout, fixup_l, fixup_coeff):
layers = []
for i in range(int(nb_layers)):
_in_planes = i == 0 and in_planes or out_planes
_stride = i == 0 and stride or 1
layers.append(block(_in_planes, out_planes, _stride, dropout=
dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropout=0.0,
fixup_coeff=1):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 *
widen_factor]
assert (depth - 4) % 6 == 0, 'You need to change the number of layers'
n = (depth - 4) / 6
block = BasicBlock
fixup_l = n * 3
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.fc.bias.data.zero_()
self.fc.weight.data.zero_()
k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1
] * self.conv1.out_channels
self.conv1.weight.data.normal_(0, math.sqrt(2.0 / k))
self.bias1 = nn.Parameter(torch.zeros(1))
self.bias2 = nn.Parameter(torch.zeros(1))
def forward(self, x):
out = self.conv1(x) + self.bias1
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(-1, self.nChannels)
return self.fc(out + self.bias2)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_add_mean_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 64
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
_tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1, 1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = _tmp7 + tmp6
_tmp7 = tl.where(rmask & xmask, tmp8, _tmp7)
tmp9 = 0.0
tmp10 = tmp5 <= tmp9
tl.store(out_ptr0 + (r1 + 4096 * x0), tmp10, rmask & xmask)
tmp7 = tl.sum(_tmp7, 1)[:, None]
tmp13 = tl.load(in_ptr2 + 0)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1])
tmp11 = 4096.0
tmp12 = tmp7 / tmp11
tmp15 = tmp12 + tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 64), (64, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32
)
buf4 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1),
torch.bool)
buf2 = reinterpret_tensor(buf1, (1, 64), (64, 1), 0)
del buf1
get_raw_stream(0)
triton_red_fused_add_mean_relu_threshold_backward_0[grid(64)](buf2,
buf0, primals_3, primals_4, buf4, 64, 4096, XBLOCK=1, RBLOCK=
2048, num_warps=16, num_stages=1)
del buf0
del primals_3
del primals_4
buf3 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(64, 4), (1, 64), 0), alpha=1, beta=1, out=buf3)
del primals_6
return buf3, primals_1, primals_2, buf2, primals_5, buf4
class BasicBlock(nn.Module):
m = 2
def __init__(self, in_planes, out_planes, stride, dropout, fixup_l,
fixup_coeff):
super(BasicBlock, self).__init__()
self._dropout = dropout
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride
=stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.equalInOut = in_planes == out_planes
self.conv_res = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=stride, padding=0, bias=False)
self.conv_res = not self.equalInOut and self.conv_res or None
self.scale = nn.Parameter(torch.ones(1))
self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in
range(4)])
k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1
] * self.conv1.out_channels
self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 *
self.m - 2)) * math.sqrt(2.0 / k))
self.conv2.weight.data.zero_()
if self.conv_res is not None:
k = self.conv_res.kernel_size[0] * self.conv_res.kernel_size[1
] * self.conv_res.out_channels
self.conv_res.weight.data.normal_(0, math.sqrt(2.0 / k))
def forward(self, x):
x_out = self.relu(x + self.biases[0])
out = self.conv1(x_out) + self.biases[1]
out = self.relu(out) + self.biases[2]
if self._dropout > 0:
out = F.dropout(out, p=self._dropout, training=self.training)
out = self.scale * self.conv2(out) + self.biases[3]
if self.equalInOut:
return torch.add(x, out)
return torch.add(self.conv_res(x_out), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride,
dropout, fixup_l, fixup_coeff):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes,
nb_layers, stride, dropout, fixup_l, fixup_coeff)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
dropout, fixup_l, fixup_coeff):
layers = []
for i in range(int(nb_layers)):
_in_planes = i == 0 and in_planes or out_planes
_stride = i == 0 and stride or 1
layers.append(block(_in_planes, out_planes, _stride, dropout=
dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNetNew(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropout=0.0,
fixup_coeff=1):
super(WideResNetNew, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 *
widen_factor]
assert (depth - 4) % 6 == 0, 'You need to change the number of layers'
n = (depth - 4) / 6
block = BasicBlock
fixup_l = n * 3
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.fc.bias.data.zero_()
self.fc.weight.data.zero_()
k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1
] * self.conv1.out_channels
self.conv1.weight.data.normal_(0, math.sqrt(2.0 / k))
self.bias1 = nn.Parameter(torch.zeros(1))
self.bias2 = nn.Parameter(torch.zeros(1))
def forward(self, input_0):
primals_3 = self.bias1
primals_4 = self.bias2
primals_1 = self.conv1.weight
primals_5 = self.fc.weight
primals_6 = self.fc.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| PavelOstyakov/pipeline | WideResNet | false | 14,167 | [
"MIT"
] | 214 | 236c050af3be9dbb534e959589040e9433501e2b | https://github.com/PavelOstyakov/pipeline/tree/236c050af3be9dbb534e959589040e9433501e2b |
ComplexLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/k7/ck7eabcc7dltczcwxevz2ehp36wjm7bizks5htevqjcppd66dcya.py
# Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %view_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
triton_poi_fused_add_sub_0 = async_compile.triton('triton_poi_fused_add_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
tl.store(in_out_ptr1 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf3)
del primals_1
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_sub_0.run(buf2, buf5, primals_2, buf1, primals_5, buf4, 256, grid=grid(256), stream=stream0)
del buf1
del buf4
del primals_2
del primals_5
return (buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf3)
del primals_1
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
get_raw_stream(0)
triton_poi_fused_add_sub_0[grid(256)](buf2, buf5, primals_2, buf1,
primals_5, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del buf4
del primals_2
del primals_5
return buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0)
class ComplexLinearNew(nn.Module):
def __init__(self, in_features, out_features):
super(ComplexLinearNew, self).__init__()
self.fc_r = nn.Linear(in_features, out_features)
self.fc_i = nn.Linear(in_features, out_features)
def forward(self, input_0, input_1):
primals_1 = self.fc_r.weight
primals_2 = self.fc_r.bias
primals_4 = self.fc_i.weight
primals_5 = self.fc_i.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| muqiaoy/dl_signal | ComplexLinear | false | 16,123 | [
"MIT"
] | 54 | 3a30d14982016644bfc96a7d1ca0109b441f17fd | https://github.com/muqiaoy/dl_signal/tree/3a30d14982016644bfc96a7d1ca0109b441f17fd |
NetVLAD | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from sklearn.neighbors import NearestNeighbors
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128, normalize_input=True,
vladv2=False, use_faiss=True):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
vladv2 : bool
If true, use vladv2 otherwise use vladv1
"""
super().__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=
vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
self.use_faiss = use_faiss
def init_params(self, clsts, traindescs):
if not self.vladv2:
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha *
clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
if not self.use_faiss:
knn = NearestNeighbors(n_jobs=-1)
knn.fit(traindescs)
del traindescs
ds_sq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
else:
index = faiss.IndexFlatL2(traindescs.shape[1])
index.add(traindescs)
del traindescs
ds_sq = np.square(index.search(clsts, 2)[1])
del index
self.alpha = (-np.log(0.01) / np.mean(ds_sq[:, 1] - ds_sq[:, 0])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, ds_sq
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.
centroids).unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm
(dim=1))
def forward(self, x):
N, C = x.shape[:2]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1)
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, -1)
vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout
=x.layout, device=x.device)
for C in range(self.num_clusters):
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3
) - self.centroids[C:C + 1, :].expand(x_flatten.size(-1), -
1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:, C:C + 1, :].unsqueeze(2)
vlad[:, C:C + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2)
vlad = vlad.view(x.size(0), -1)
vlad = F.normalize(vlad, p=2, dim=1)
return vlad
def get_inputs():
return [torch.rand([4, 128, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
from sklearn.neighbors import NearestNeighbors
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = xindex // 4096
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13,
out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19,
out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25,
out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31,
out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37,
out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43,
out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49,
out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55,
out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61,
out_ptr62, out_ptr63, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 524288
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last')
tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last')
tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last')
tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last')
tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last')
tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last')
tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last')
tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last')
tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last')
tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last')
tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last')
tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last')
tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last')
tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last')
tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last')
tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last')
tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last')
tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last')
tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last')
tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last')
tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last')
tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last')
tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last')
tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tmp7 = tmp5 - tmp6
tmp9 = tmp5 - tmp8
tmp11 = tmp5 - tmp10
tmp13 = tmp5 - tmp12
tmp15 = tmp5 - tmp14
tmp17 = tmp5 - tmp16
tmp19 = tmp5 - tmp18
tmp21 = tmp5 - tmp20
tmp23 = tmp5 - tmp22
tmp25 = tmp5 - tmp24
tmp27 = tmp5 - tmp26
tmp29 = tmp5 - tmp28
tmp31 = tmp5 - tmp30
tmp33 = tmp5 - tmp32
tmp35 = tmp5 - tmp34
tmp37 = tmp5 - tmp36
tmp39 = tmp5 - tmp38
tmp41 = tmp5 - tmp40
tmp43 = tmp5 - tmp42
tmp45 = tmp5 - tmp44
tmp47 = tmp5 - tmp46
tmp49 = tmp5 - tmp48
tmp51 = tmp5 - tmp50
tmp53 = tmp5 - tmp52
tmp55 = tmp5 - tmp54
tmp57 = tmp5 - tmp56
tmp59 = tmp5 - tmp58
tmp61 = tmp5 - tmp60
tmp63 = tmp5 - tmp62
tmp65 = tmp5 - tmp64
tmp67 = tmp5 - tmp66
tmp69 = tmp5 - tmp68
tmp71 = tmp5 - tmp70
tmp73 = tmp5 - tmp72
tmp75 = tmp5 - tmp74
tmp77 = tmp5 - tmp76
tmp79 = tmp5 - tmp78
tmp81 = tmp5 - tmp80
tmp83 = tmp5 - tmp82
tmp85 = tmp5 - tmp84
tmp87 = tmp5 - tmp86
tmp89 = tmp5 - tmp88
tmp91 = tmp5 - tmp90
tmp93 = tmp5 - tmp92
tmp95 = tmp5 - tmp94
tmp97 = tmp5 - tmp96
tmp99 = tmp5 - tmp98
tmp101 = tmp5 - tmp100
tmp103 = tmp5 - tmp102
tmp105 = tmp5 - tmp104
tmp107 = tmp5 - tmp106
tmp109 = tmp5 - tmp108
tmp111 = tmp5 - tmp110
tmp113 = tmp5 - tmp112
tmp115 = tmp5 - tmp114
tmp117 = tmp5 - tmp116
tmp119 = tmp5 - tmp118
tmp121 = tmp5 - tmp120
tmp123 = tmp5 - tmp122
tmp125 = tmp5 - tmp124
tmp127 = tmp5 - tmp126
tmp129 = tmp5 - tmp128
tmp131 = tmp5 - tmp130
tl.store(out_ptr0 + x3, tmp5, None)
tl.store(out_ptr1 + x3, tmp7, None)
tl.store(out_ptr2 + x3, tmp9, None)
tl.store(out_ptr3 + x3, tmp11, None)
tl.store(out_ptr4 + x3, tmp13, None)
tl.store(out_ptr5 + x3, tmp15, None)
tl.store(out_ptr6 + x3, tmp17, None)
tl.store(out_ptr7 + x3, tmp19, None)
tl.store(out_ptr8 + x3, tmp21, None)
tl.store(out_ptr9 + x3, tmp23, None)
tl.store(out_ptr10 + x3, tmp25, None)
tl.store(out_ptr11 + x3, tmp27, None)
tl.store(out_ptr12 + x3, tmp29, None)
tl.store(out_ptr13 + x3, tmp31, None)
tl.store(out_ptr14 + x3, tmp33, None)
tl.store(out_ptr15 + x3, tmp35, None)
tl.store(out_ptr16 + x3, tmp37, None)
tl.store(out_ptr17 + x3, tmp39, None)
tl.store(out_ptr18 + x3, tmp41, None)
tl.store(out_ptr19 + x3, tmp43, None)
tl.store(out_ptr20 + x3, tmp45, None)
tl.store(out_ptr21 + x3, tmp47, None)
tl.store(out_ptr22 + x3, tmp49, None)
tl.store(out_ptr23 + x3, tmp51, None)
tl.store(out_ptr24 + x3, tmp53, None)
tl.store(out_ptr25 + x3, tmp55, None)
tl.store(out_ptr26 + x3, tmp57, None)
tl.store(out_ptr27 + x3, tmp59, None)
tl.store(out_ptr28 + x3, tmp61, None)
tl.store(out_ptr29 + x3, tmp63, None)
tl.store(out_ptr30 + x3, tmp65, None)
tl.store(out_ptr31 + x3, tmp67, None)
tl.store(out_ptr32 + x3, tmp69, None)
tl.store(out_ptr33 + x3, tmp71, None)
tl.store(out_ptr34 + x3, tmp73, None)
tl.store(out_ptr35 + x3, tmp75, None)
tl.store(out_ptr36 + x3, tmp77, None)
tl.store(out_ptr37 + x3, tmp79, None)
tl.store(out_ptr38 + x3, tmp81, None)
tl.store(out_ptr39 + x3, tmp83, None)
tl.store(out_ptr40 + x3, tmp85, None)
tl.store(out_ptr41 + x3, tmp87, None)
tl.store(out_ptr42 + x3, tmp89, None)
tl.store(out_ptr43 + x3, tmp91, None)
tl.store(out_ptr44 + x3, tmp93, None)
tl.store(out_ptr45 + x3, tmp95, None)
tl.store(out_ptr46 + x3, tmp97, None)
tl.store(out_ptr47 + x3, tmp99, None)
tl.store(out_ptr48 + x3, tmp101, None)
tl.store(out_ptr49 + x3, tmp103, None)
tl.store(out_ptr50 + x3, tmp105, None)
tl.store(out_ptr51 + x3, tmp107, None)
tl.store(out_ptr52 + x3, tmp109, None)
tl.store(out_ptr53 + x3, tmp111, None)
tl.store(out_ptr54 + x3, tmp113, None)
tl.store(out_ptr55 + x3, tmp115, None)
tl.store(out_ptr56 + x3, tmp117, None)
tl.store(out_ptr57 + x3, tmp119, None)
tl.store(out_ptr58 + x3, tmp121, None)
tl.store(out_ptr59 + x3, tmp123, None)
tl.store(out_ptr60 + x3, tmp125, None)
tl.store(out_ptr61 + x3, tmp127, None)
tl.store(out_ptr62 + x3, tmp129, None)
tl.store(out_ptr63 + x3, tmp131, None)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 262144 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, None)
tl.store(out_ptr1 + x3, tmp8, None)
@triton.jit
def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17,
in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24,
in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31,
in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12,
out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18,
out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24,
out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 128
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
x1 = xindex // 128
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp3 = tl.load(in_ptr2 + (r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr4 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp14 = tl.load(in_ptr2 + (4096 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp23 = tl.load(in_ptr2 + (8192 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp32 = tl.load(in_ptr2 + (12288 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp41 = tl.load(in_ptr2 + (16384 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp50 = tl.load(in_ptr2 + (20480 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp59 = tl.load(in_ptr2 + (24576 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp67 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp68 = tl.load(in_ptr2 + (28672 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp77 = tl.load(in_ptr2 + (32768 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp85 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp86 = tl.load(in_ptr2 + (36864 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp94 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp95 = tl.load(in_ptr2 + (40960 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp103 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp104 = tl.load(in_ptr2 + (45056 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp112 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp113 = tl.load(in_ptr2 + (49152 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp121 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp122 = tl.load(in_ptr2 + (53248 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp130 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp131 = tl.load(in_ptr2 + (57344 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp139 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp140 = tl.load(in_ptr2 + (61440 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp148 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp149 = tl.load(in_ptr2 + (65536 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp157 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp158 = tl.load(in_ptr2 + (69632 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp166 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp167 = tl.load(in_ptr2 + (73728 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp175 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp176 = tl.load(in_ptr2 + (77824 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp184 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp185 = tl.load(in_ptr2 + (81920 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp193 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp194 = tl.load(in_ptr2 + (86016 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp202 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp203 = tl.load(in_ptr2 + (90112 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp211 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp212 = tl.load(in_ptr2 + (94208 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp220 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp221 = tl.load(in_ptr2 + (98304 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp229 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp230 = tl.load(in_ptr2 + (102400 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp238 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp239 = tl.load(in_ptr2 + (106496 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp247 = tl.load(in_ptr31 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp248 = tl.load(in_ptr2 + (110592 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp256 = tl.load(in_ptr32 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp257 = tl.load(in_ptr2 + (114688 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp15 = tmp14 - tmp4
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 / tmp7
tmp18 = tmp13 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = _tmp20 + tmp19
_tmp20 = tl.where(rmask & xmask, tmp21, _tmp20)
tmp24 = tmp23 - tmp4
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp25 / tmp7
tmp27 = tmp22 * tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = _tmp29 + tmp28
_tmp29 = tl.where(rmask & xmask, tmp30, _tmp29)
tmp33 = tmp32 - tmp4
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 / tmp7
tmp36 = tmp31 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = _tmp38 + tmp37
_tmp38 = tl.where(rmask & xmask, tmp39, _tmp38)
tmp42 = tmp41 - tmp4
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp43 / tmp7
tmp45 = tmp40 * tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp48 = _tmp47 + tmp46
_tmp47 = tl.where(rmask & xmask, tmp48, _tmp47)
tmp51 = tmp50 - tmp4
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp52 / tmp7
tmp54 = tmp49 * tmp53
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp57 = _tmp56 + tmp55
_tmp56 = tl.where(rmask & xmask, tmp57, _tmp56)
tmp60 = tmp59 - tmp4
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp61 / tmp7
tmp63 = tmp58 * tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = _tmp65 + tmp64
_tmp65 = tl.where(rmask & xmask, tmp66, _tmp65)
tmp69 = tmp68 - tmp4
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 / tmp7
tmp72 = tmp67 * tmp71
tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK])
tmp75 = _tmp74 + tmp73
_tmp74 = tl.where(rmask & xmask, tmp75, _tmp74)
tmp78 = tmp77 - tmp4
tmp79 = tl_math.exp(tmp78)
tmp80 = tmp79 / tmp7
tmp81 = tmp76 * tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = _tmp83 + tmp82
_tmp83 = tl.where(rmask & xmask, tmp84, _tmp83)
tmp87 = tmp86 - tmp4
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp88 / tmp7
tmp90 = tmp85 * tmp89
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp93 = _tmp92 + tmp91
_tmp92 = tl.where(rmask & xmask, tmp93, _tmp92)
tmp96 = tmp95 - tmp4
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp97 / tmp7
tmp99 = tmp94 * tmp98
tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK])
tmp102 = _tmp101 + tmp100
_tmp101 = tl.where(rmask & xmask, tmp102, _tmp101)
tmp105 = tmp104 - tmp4
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp106 / tmp7
tmp108 = tmp103 * tmp107
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp111 = _tmp110 + tmp109
_tmp110 = tl.where(rmask & xmask, tmp111, _tmp110)
tmp114 = tmp113 - tmp4
tmp115 = tl_math.exp(tmp114)
tmp116 = tmp115 / tmp7
tmp117 = tmp112 * tmp116
tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK])
tmp120 = _tmp119 + tmp118
_tmp119 = tl.where(rmask & xmask, tmp120, _tmp119)
tmp123 = tmp122 - tmp4
tmp124 = tl_math.exp(tmp123)
tmp125 = tmp124 / tmp7
tmp126 = tmp121 * tmp125
tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK])
tmp129 = _tmp128 + tmp127
_tmp128 = tl.where(rmask & xmask, tmp129, _tmp128)
tmp132 = tmp131 - tmp4
tmp133 = tl_math.exp(tmp132)
tmp134 = tmp133 / tmp7
tmp135 = tmp130 * tmp134
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp138 = _tmp137 + tmp136
_tmp137 = tl.where(rmask & xmask, tmp138, _tmp137)
tmp141 = tmp140 - tmp4
tmp142 = tl_math.exp(tmp141)
tmp143 = tmp142 / tmp7
tmp144 = tmp139 * tmp143
tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK])
tmp147 = _tmp146 + tmp145
_tmp146 = tl.where(rmask & xmask, tmp147, _tmp146)
tmp150 = tmp149 - tmp4
tmp151 = tl_math.exp(tmp150)
tmp152 = tmp151 / tmp7
tmp153 = tmp148 * tmp152
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = _tmp155 + tmp154
_tmp155 = tl.where(rmask & xmask, tmp156, _tmp155)
tmp159 = tmp158 - tmp4
tmp160 = tl_math.exp(tmp159)
tmp161 = tmp160 / tmp7
tmp162 = tmp157 * tmp161
tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK])
tmp165 = _tmp164 + tmp163
_tmp164 = tl.where(rmask & xmask, tmp165, _tmp164)
tmp168 = tmp167 - tmp4
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp169 / tmp7
tmp171 = tmp166 * tmp170
tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK])
tmp174 = _tmp173 + tmp172
_tmp173 = tl.where(rmask & xmask, tmp174, _tmp173)
tmp177 = tmp176 - tmp4
tmp178 = tl_math.exp(tmp177)
tmp179 = tmp178 / tmp7
tmp180 = tmp175 * tmp179
tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK])
tmp183 = _tmp182 + tmp181
_tmp182 = tl.where(rmask & xmask, tmp183, _tmp182)
tmp186 = tmp185 - tmp4
tmp187 = tl_math.exp(tmp186)
tmp188 = tmp187 / tmp7
tmp189 = tmp184 * tmp188
tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK])
tmp192 = _tmp191 + tmp190
_tmp191 = tl.where(rmask & xmask, tmp192, _tmp191)
tmp195 = tmp194 - tmp4
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp196 / tmp7
tmp198 = tmp193 * tmp197
tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK])
tmp201 = _tmp200 + tmp199
_tmp200 = tl.where(rmask & xmask, tmp201, _tmp200)
tmp204 = tmp203 - tmp4
tmp205 = tl_math.exp(tmp204)
tmp206 = tmp205 / tmp7
tmp207 = tmp202 * tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = _tmp209 + tmp208
_tmp209 = tl.where(rmask & xmask, tmp210, _tmp209)
tmp213 = tmp212 - tmp4
tmp214 = tl_math.exp(tmp213)
tmp215 = tmp214 / tmp7
tmp216 = tmp211 * tmp215
tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK])
tmp219 = _tmp218 + tmp217
_tmp218 = tl.where(rmask & xmask, tmp219, _tmp218)
tmp222 = tmp221 - tmp4
tmp223 = tl_math.exp(tmp222)
tmp224 = tmp223 / tmp7
tmp225 = tmp220 * tmp224
tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK])
tmp228 = _tmp227 + tmp226
_tmp227 = tl.where(rmask & xmask, tmp228, _tmp227)
tmp231 = tmp230 - tmp4
tmp232 = tl_math.exp(tmp231)
tmp233 = tmp232 / tmp7
tmp234 = tmp229 * tmp233
tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK])
tmp237 = _tmp236 + tmp235
_tmp236 = tl.where(rmask & xmask, tmp237, _tmp236)
tmp240 = tmp239 - tmp4
tmp241 = tl_math.exp(tmp240)
tmp242 = tmp241 / tmp7
tmp243 = tmp238 * tmp242
tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK])
tmp246 = _tmp245 + tmp244
_tmp245 = tl.where(rmask & xmask, tmp246, _tmp245)
tmp249 = tmp248 - tmp4
tmp250 = tl_math.exp(tmp249)
tmp251 = tmp250 / tmp7
tmp252 = tmp247 * tmp251
tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK])
tmp255 = _tmp254 + tmp253
_tmp254 = tl.where(rmask & xmask, tmp255, _tmp254)
tmp258 = tmp257 - tmp4
tmp259 = tl_math.exp(tmp258)
tmp260 = tmp259 / tmp7
tmp261 = tmp256 * tmp260
tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK])
tmp264 = _tmp263 + tmp262
_tmp263 = tl.where(rmask & xmask, tmp264, _tmp263)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + x3, tmp11, xmask)
tmp20 = tl.sum(_tmp20, 1)[:, None]
tl.store(out_ptr1 + x3, tmp20, xmask)
tmp29 = tl.sum(_tmp29, 1)[:, None]
tl.store(out_ptr2 + x3, tmp29, xmask)
tmp38 = tl.sum(_tmp38, 1)[:, None]
tl.store(out_ptr3 + x3, tmp38, xmask)
tmp47 = tl.sum(_tmp47, 1)[:, None]
tl.store(out_ptr4 + x3, tmp47, xmask)
tmp56 = tl.sum(_tmp56, 1)[:, None]
tl.store(out_ptr5 + x3, tmp56, xmask)
tmp65 = tl.sum(_tmp65, 1)[:, None]
tl.store(out_ptr6 + x3, tmp65, xmask)
tmp74 = tl.sum(_tmp74, 1)[:, None]
tl.store(out_ptr7 + x3, tmp74, xmask)
tmp83 = tl.sum(_tmp83, 1)[:, None]
tl.store(out_ptr8 + x3, tmp83, xmask)
tmp92 = tl.sum(_tmp92, 1)[:, None]
tl.store(out_ptr9 + x3, tmp92, xmask)
tmp101 = tl.sum(_tmp101, 1)[:, None]
tl.store(out_ptr10 + x3, tmp101, xmask)
tmp110 = tl.sum(_tmp110, 1)[:, None]
tl.store(out_ptr11 + x3, tmp110, xmask)
tmp119 = tl.sum(_tmp119, 1)[:, None]
tl.store(out_ptr12 + x3, tmp119, xmask)
tmp128 = tl.sum(_tmp128, 1)[:, None]
tl.store(out_ptr13 + x3, tmp128, xmask)
tmp137 = tl.sum(_tmp137, 1)[:, None]
tl.store(out_ptr14 + x3, tmp137, xmask)
tmp146 = tl.sum(_tmp146, 1)[:, None]
tl.store(out_ptr15 + x3, tmp146, xmask)
tmp155 = tl.sum(_tmp155, 1)[:, None]
tl.store(out_ptr16 + x3, tmp155, xmask)
tmp164 = tl.sum(_tmp164, 1)[:, None]
tl.store(out_ptr17 + x3, tmp164, xmask)
tmp173 = tl.sum(_tmp173, 1)[:, None]
tl.store(out_ptr18 + x3, tmp173, xmask)
tmp182 = tl.sum(_tmp182, 1)[:, None]
tl.store(out_ptr19 + x3, tmp182, xmask)
tmp191 = tl.sum(_tmp191, 1)[:, None]
tl.store(out_ptr20 + x3, tmp191, xmask)
tmp200 = tl.sum(_tmp200, 1)[:, None]
tl.store(out_ptr21 + x3, tmp200, xmask)
tmp209 = tl.sum(_tmp209, 1)[:, None]
tl.store(out_ptr22 + x3, tmp209, xmask)
tmp218 = tl.sum(_tmp218, 1)[:, None]
tl.store(out_ptr23 + x3, tmp218, xmask)
tmp227 = tl.sum(_tmp227, 1)[:, None]
tl.store(out_ptr24 + x3, tmp227, xmask)
tmp236 = tl.sum(_tmp236, 1)[:, None]
tl.store(out_ptr25 + x3, tmp236, xmask)
tmp245 = tl.sum(_tmp245, 1)[:, None]
tl.store(out_ptr26 + x3, tmp245, xmask)
tmp254 = tl.sum(_tmp254, 1)[:, None]
tl.store(out_ptr27 + x3, tmp254, xmask)
tmp263 = tl.sum(_tmp263, 1)[:, None]
tl.store(out_ptr28 + x3, tmp263, xmask)
@triton.jit
def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18,
in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25,
in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14,
out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20,
out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26,
out_ptr27, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = xindex // 128
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (118784 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (122880 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (126976 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (131072 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (135168 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (139264 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (143360 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp66 = tl.load(in_ptr1 + (147456 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp75 = tl.load(in_ptr1 + (151552 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp84 = tl.load(in_ptr1 + (155648 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp92 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp93 = tl.load(in_ptr1 + (159744 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp101 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp102 = tl.load(in_ptr1 + (163840 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp110 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp111 = tl.load(in_ptr1 + (167936 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp119 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp120 = tl.load(in_ptr1 + (172032 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp128 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp129 = tl.load(in_ptr1 + (176128 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp137 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp138 = tl.load(in_ptr1 + (180224 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp146 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp147 = tl.load(in_ptr1 + (184320 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp155 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp156 = tl.load(in_ptr1 + (188416 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp164 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp165 = tl.load(in_ptr1 + (192512 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp173 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp174 = tl.load(in_ptr1 + (196608 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp182 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp183 = tl.load(in_ptr1 + (200704 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp191 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp192 = tl.load(in_ptr1 + (204800 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp200 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp201 = tl.load(in_ptr1 + (208896 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp209 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp210 = tl.load(in_ptr1 + (212992 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp218 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp219 = tl.load(in_ptr1 + (217088 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp227 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp228 = tl.load(in_ptr1 + (221184 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp236 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp237 = tl.load(in_ptr1 + (225280 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp245 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp246 = tl.load(in_ptr1 + (229376 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp67 = tmp66 - tmp2
tmp68 = tl_math.exp(tmp67)
tmp69 = tmp68 / tmp5
tmp70 = tmp65 * tmp69
tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK])
tmp73 = _tmp72 + tmp71
_tmp72 = tl.where(rmask & xmask, tmp73, _tmp72)
tmp76 = tmp75 - tmp2
tmp77 = tl_math.exp(tmp76)
tmp78 = tmp77 / tmp5
tmp79 = tmp74 * tmp78
tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK])
tmp82 = _tmp81 + tmp80
_tmp81 = tl.where(rmask & xmask, tmp82, _tmp81)
tmp85 = tmp84 - tmp2
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp86 / tmp5
tmp88 = tmp83 * tmp87
tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK])
tmp91 = _tmp90 + tmp89
_tmp90 = tl.where(rmask & xmask, tmp91, _tmp90)
tmp94 = tmp93 - tmp2
tmp95 = tl_math.exp(tmp94)
tmp96 = tmp95 / tmp5
tmp97 = tmp92 * tmp96
tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK])
tmp100 = _tmp99 + tmp98
_tmp99 = tl.where(rmask & xmask, tmp100, _tmp99)
tmp103 = tmp102 - tmp2
tmp104 = tl_math.exp(tmp103)
tmp105 = tmp104 / tmp5
tmp106 = tmp101 * tmp105
tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK])
tmp109 = _tmp108 + tmp107
_tmp108 = tl.where(rmask & xmask, tmp109, _tmp108)
tmp112 = tmp111 - tmp2
tmp113 = tl_math.exp(tmp112)
tmp114 = tmp113 / tmp5
tmp115 = tmp110 * tmp114
tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK])
tmp118 = _tmp117 + tmp116
_tmp117 = tl.where(rmask & xmask, tmp118, _tmp117)
tmp121 = tmp120 - tmp2
tmp122 = tl_math.exp(tmp121)
tmp123 = tmp122 / tmp5
tmp124 = tmp119 * tmp123
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = _tmp126 + tmp125
_tmp126 = tl.where(rmask & xmask, tmp127, _tmp126)
tmp130 = tmp129 - tmp2
tmp131 = tl_math.exp(tmp130)
tmp132 = tmp131 / tmp5
tmp133 = tmp128 * tmp132
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = _tmp135 + tmp134
_tmp135 = tl.where(rmask & xmask, tmp136, _tmp135)
tmp139 = tmp138 - tmp2
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp140 / tmp5
tmp142 = tmp137 * tmp141
tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK])
tmp145 = _tmp144 + tmp143
_tmp144 = tl.where(rmask & xmask, tmp145, _tmp144)
tmp148 = tmp147 - tmp2
tmp149 = tl_math.exp(tmp148)
tmp150 = tmp149 / tmp5
tmp151 = tmp146 * tmp150
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp154 = _tmp153 + tmp152
_tmp153 = tl.where(rmask & xmask, tmp154, _tmp153)
tmp157 = tmp156 - tmp2
tmp158 = tl_math.exp(tmp157)
tmp159 = tmp158 / tmp5
tmp160 = tmp155 * tmp159
tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK])
tmp163 = _tmp162 + tmp161
_tmp162 = tl.where(rmask & xmask, tmp163, _tmp162)
tmp166 = tmp165 - tmp2
tmp167 = tl_math.exp(tmp166)
tmp168 = tmp167 / tmp5
tmp169 = tmp164 * tmp168
tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK])
tmp172 = _tmp171 + tmp170
_tmp171 = tl.where(rmask & xmask, tmp172, _tmp171)
tmp175 = tmp174 - tmp2
tmp176 = tl_math.exp(tmp175)
tmp177 = tmp176 / tmp5
tmp178 = tmp173 * tmp177
tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK])
tmp181 = _tmp180 + tmp179
_tmp180 = tl.where(rmask & xmask, tmp181, _tmp180)
tmp184 = tmp183 - tmp2
tmp185 = tl_math.exp(tmp184)
tmp186 = tmp185 / tmp5
tmp187 = tmp182 * tmp186
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp190 = _tmp189 + tmp188
_tmp189 = tl.where(rmask & xmask, tmp190, _tmp189)
tmp193 = tmp192 - tmp2
tmp194 = tl_math.exp(tmp193)
tmp195 = tmp194 / tmp5
tmp196 = tmp191 * tmp195
tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK])
tmp199 = _tmp198 + tmp197
_tmp198 = tl.where(rmask & xmask, tmp199, _tmp198)
tmp202 = tmp201 - tmp2
tmp203 = tl_math.exp(tmp202)
tmp204 = tmp203 / tmp5
tmp205 = tmp200 * tmp204
tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK])
tmp208 = _tmp207 + tmp206
_tmp207 = tl.where(rmask & xmask, tmp208, _tmp207)
tmp211 = tmp210 - tmp2
tmp212 = tl_math.exp(tmp211)
tmp213 = tmp212 / tmp5
tmp214 = tmp209 * tmp213
tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK])
tmp217 = _tmp216 + tmp215
_tmp216 = tl.where(rmask & xmask, tmp217, _tmp216)
tmp220 = tmp219 - tmp2
tmp221 = tl_math.exp(tmp220)
tmp222 = tmp221 / tmp5
tmp223 = tmp218 * tmp222
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp226 = _tmp225 + tmp224
_tmp225 = tl.where(rmask & xmask, tmp226, _tmp225)
tmp229 = tmp228 - tmp2
tmp230 = tl_math.exp(tmp229)
tmp231 = tmp230 / tmp5
tmp232 = tmp227 * tmp231
tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK])
tmp235 = _tmp234 + tmp233
_tmp234 = tl.where(rmask & xmask, tmp235, _tmp234)
tmp238 = tmp237 - tmp2
tmp239 = tl_math.exp(tmp238)
tmp240 = tmp239 / tmp5
tmp241 = tmp236 * tmp240
tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK])
tmp244 = _tmp243 + tmp242
_tmp243 = tl.where(rmask & xmask, tmp244, _tmp243)
tmp247 = tmp246 - tmp2
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp248 / tmp5
tmp250 = tmp245 * tmp249
tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK])
tmp253 = _tmp252 + tmp251
_tmp252 = tl.where(rmask & xmask, tmp253, _tmp252)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + x3, tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + x3, tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + x3, tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + x3, tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + x3, tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + x3, tmp63, xmask)
tmp72 = tl.sum(_tmp72, 1)[:, None]
tl.store(out_ptr7 + x3, tmp72, xmask)
tmp81 = tl.sum(_tmp81, 1)[:, None]
tl.store(out_ptr8 + x3, tmp81, xmask)
tmp90 = tl.sum(_tmp90, 1)[:, None]
tl.store(out_ptr9 + x3, tmp90, xmask)
tmp99 = tl.sum(_tmp99, 1)[:, None]
tl.store(out_ptr10 + x3, tmp99, xmask)
tmp108 = tl.sum(_tmp108, 1)[:, None]
tl.store(out_ptr11 + x3, tmp108, xmask)
tmp117 = tl.sum(_tmp117, 1)[:, None]
tl.store(out_ptr12 + x3, tmp117, xmask)
tmp126 = tl.sum(_tmp126, 1)[:, None]
tl.store(out_ptr13 + x3, tmp126, xmask)
tmp135 = tl.sum(_tmp135, 1)[:, None]
tl.store(out_ptr14 + x3, tmp135, xmask)
tmp144 = tl.sum(_tmp144, 1)[:, None]
tl.store(out_ptr15 + x3, tmp144, xmask)
tmp153 = tl.sum(_tmp153, 1)[:, None]
tl.store(out_ptr16 + x3, tmp153, xmask)
tmp162 = tl.sum(_tmp162, 1)[:, None]
tl.store(out_ptr17 + x3, tmp162, xmask)
tmp171 = tl.sum(_tmp171, 1)[:, None]
tl.store(out_ptr18 + x3, tmp171, xmask)
tmp180 = tl.sum(_tmp180, 1)[:, None]
tl.store(out_ptr19 + x3, tmp180, xmask)
tmp189 = tl.sum(_tmp189, 1)[:, None]
tl.store(out_ptr20 + x3, tmp189, xmask)
tmp198 = tl.sum(_tmp198, 1)[:, None]
tl.store(out_ptr21 + x3, tmp198, xmask)
tmp207 = tl.sum(_tmp207, 1)[:, None]
tl.store(out_ptr22 + x3, tmp207, xmask)
tmp216 = tl.sum(_tmp216, 1)[:, None]
tl.store(out_ptr23 + x3, tmp216, xmask)
tmp225 = tl.sum(_tmp225, 1)[:, None]
tl.store(out_ptr24 + x3, tmp225, xmask)
tmp234 = tl.sum(_tmp234, 1)[:, None]
tl.store(out_ptr25 + x3, tmp234, xmask)
tmp243 = tl.sum(_tmp243, 1)[:, None]
tl.store(out_ptr26 + x3, tmp243, xmask)
tmp252 = tl.sum(_tmp252, 1)[:, None]
tl.store(out_ptr27 + x3, tmp252, xmask)
@triton.jit
def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = xindex // 128
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (233472 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (237568 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (241664 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (245760 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (249856 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (253952 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (258048 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + x3, tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + x3, tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + x3, tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + x3, tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + x3, tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + x3, tmp63, xmask)
@triton.jit
def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12,
in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19,
in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26,
in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33,
in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40,
in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47,
in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54,
in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61,
in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 64
r2 = rindex
x1 = xindex // 64
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (r2 + 128 * x1), tmp5 & xmask, eviction_policy
='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 3, tl.int64)
tmp8 = tmp0 >= tmp7
tmp9 = tmp0 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tl.load(in_ptr1 + (r2 + 128 * x1), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.full([1, 1], 2, tl.int64)
tmp13 = tmp0 >= tmp12
tmp14 = tmp0 < tmp7
tmp15 = tmp13 & tmp14
tmp16 = tl.load(in_ptr2 + (r2 + 128 * x1), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.full([1, 1], 1, tl.int64)
tmp18 = tmp0 >= tmp17
tmp19 = tmp0 < tmp12
tmp20 = tmp18 & tmp19
tmp21 = tl.load(in_ptr3 + (r2 + 128 * x1), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp0 < tmp17
tmp23 = tl.load(in_ptr4 + (r2 + 128 * x1), tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = 0.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tl.where(tmp20, tmp21, tmp25)
tmp27 = tl.where(tmp15, tmp16, tmp26)
tmp28 = tl.where(tmp10, tmp11, tmp27)
tmp29 = tl.where(tmp5, tmp6, tmp28)
tmp30 = tl.full([1, 1], 8, tl.int64)
tmp31 = tmp0 >= tmp30
tmp32 = tl.full([1, 1], 9, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr5 + (r2 + 128 * x1), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.full([1, 1], 7, tl.int64)
tmp37 = tmp0 >= tmp36
tmp38 = tmp0 < tmp30
tmp39 = tmp37 & tmp38
tmp40 = tl.load(in_ptr6 + (r2 + 128 * x1), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tl.full([1, 1], 6, tl.int64)
tmp42 = tmp0 >= tmp41
tmp43 = tmp0 < tmp36
tmp44 = tmp42 & tmp43
tmp45 = tl.load(in_ptr7 + (r2 + 128 * x1), tmp44 & xmask,
eviction_policy='evict_last', other=0.0)
tmp46 = tmp0 >= tmp3
tmp47 = tmp0 < tmp41
tmp48 = tmp46 & tmp47
tmp49 = tl.load(in_ptr8 + (r2 + 128 * x1), tmp48 & xmask,
eviction_policy='evict_last', other=0.0)
tmp50 = tl.where(tmp48, tmp49, tmp29)
tmp51 = tl.where(tmp44, tmp45, tmp50)
tmp52 = tl.where(tmp39, tmp40, tmp51)
tmp53 = tl.where(tmp34, tmp35, tmp52)
tmp54 = tl.full([1, 1], 12, tl.int64)
tmp55 = tmp0 >= tmp54
tmp56 = tl.full([1, 1], 13, tl.int64)
tmp57 = tmp0 < tmp56
tmp58 = tmp55 & tmp57
tmp59 = tl.load(in_ptr9 + (r2 + 128 * x1), tmp58 & xmask,
eviction_policy='evict_last', other=0.0)
tmp60 = tl.full([1, 1], 11, tl.int64)
tmp61 = tmp0 >= tmp60
tmp62 = tmp0 < tmp54
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr10 + (r2 + 128 * x1), tmp63 & xmask,
eviction_policy='evict_last', other=0.0)
tmp65 = tl.full([1, 1], 10, tl.int64)
tmp66 = tmp0 >= tmp65
tmp67 = tmp0 < tmp60
tmp68 = tmp66 & tmp67
tmp69 = tl.load(in_ptr11 + (r2 + 128 * x1), tmp68 & xmask,
eviction_policy='evict_last', other=0.0)
tmp70 = tmp0 >= tmp32
tmp71 = tmp0 < tmp65
tmp72 = tmp70 & tmp71
tmp73 = tl.load(in_ptr12 + (r2 + 128 * x1), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp72, tmp73, tmp53)
tmp75 = tl.where(tmp68, tmp69, tmp74)
tmp76 = tl.where(tmp63, tmp64, tmp75)
tmp77 = tl.where(tmp58, tmp59, tmp76)
tmp78 = tl.full([1, 1], 16, tl.int64)
tmp79 = tmp0 >= tmp78
tmp80 = tl.full([1, 1], 17, tl.int64)
tmp81 = tmp0 < tmp80
tmp82 = tmp79 & tmp81
tmp83 = tl.load(in_ptr13 + (r2 + 128 * x1), tmp82 & xmask,
eviction_policy='evict_last', other=0.0)
tmp84 = tl.full([1, 1], 15, tl.int64)
tmp85 = tmp0 >= tmp84
tmp86 = tmp0 < tmp78
tmp87 = tmp85 & tmp86
tmp88 = tl.load(in_ptr14 + (r2 + 128 * x1), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full([1, 1], 14, tl.int64)
tmp90 = tmp0 >= tmp89
tmp91 = tmp0 < tmp84
tmp92 = tmp90 & tmp91
tmp93 = tl.load(in_ptr15 + (r2 + 128 * x1), tmp92 & xmask,
eviction_policy='evict_last', other=0.0)
tmp94 = tmp0 >= tmp56
tmp95 = tmp0 < tmp89
tmp96 = tmp94 & tmp95
tmp97 = tl.load(in_ptr16 + (r2 + 128 * x1), tmp96 & xmask,
eviction_policy='evict_last', other=0.0)
tmp98 = tl.where(tmp96, tmp97, tmp77)
tmp99 = tl.where(tmp92, tmp93, tmp98)
tmp100 = tl.where(tmp87, tmp88, tmp99)
tmp101 = tl.where(tmp82, tmp83, tmp100)
tmp102 = tl.full([1, 1], 20, tl.int64)
tmp103 = tmp0 >= tmp102
tmp104 = tl.full([1, 1], 21, tl.int64)
tmp105 = tmp0 < tmp104
tmp106 = tmp103 & tmp105
tmp107 = tl.load(in_ptr17 + (r2 + 128 * x1), tmp106 & xmask,
eviction_policy='evict_last', other=0.0)
tmp108 = tl.full([1, 1], 19, tl.int64)
tmp109 = tmp0 >= tmp108
tmp110 = tmp0 < tmp102
tmp111 = tmp109 & tmp110
tmp112 = tl.load(in_ptr18 + (r2 + 128 * x1), tmp111 & xmask,
eviction_policy='evict_last', other=0.0)
tmp113 = tl.full([1, 1], 18, tl.int64)
tmp114 = tmp0 >= tmp113
tmp115 = tmp0 < tmp108
tmp116 = tmp114 & tmp115
tmp117 = tl.load(in_ptr19 + (r2 + 128 * x1), tmp116 & xmask,
eviction_policy='evict_last', other=0.0)
tmp118 = tmp0 >= tmp80
tmp119 = tmp0 < tmp113
tmp120 = tmp118 & tmp119
tmp121 = tl.load(in_ptr20 + (r2 + 128 * x1), tmp120 & xmask,
eviction_policy='evict_last', other=0.0)
tmp122 = tl.where(tmp120, tmp121, tmp101)
tmp123 = tl.where(tmp116, tmp117, tmp122)
tmp124 = tl.where(tmp111, tmp112, tmp123)
tmp125 = tl.where(tmp106, tmp107, tmp124)
tmp126 = tl.full([1, 1], 24, tl.int64)
tmp127 = tmp0 >= tmp126
tmp128 = tl.full([1, 1], 25, tl.int64)
tmp129 = tmp0 < tmp128
tmp130 = tmp127 & tmp129
tmp131 = tl.load(in_ptr21 + (r2 + 128 * x1), tmp130 & xmask,
eviction_policy='evict_last', other=0.0)
tmp132 = tl.full([1, 1], 23, tl.int64)
tmp133 = tmp0 >= tmp132
tmp134 = tmp0 < tmp126
tmp135 = tmp133 & tmp134
tmp136 = tl.load(in_ptr22 + (r2 + 128 * x1), tmp135 & xmask,
eviction_policy='evict_last', other=0.0)
tmp137 = tl.full([1, 1], 22, tl.int64)
tmp138 = tmp0 >= tmp137
tmp139 = tmp0 < tmp132
tmp140 = tmp138 & tmp139
tmp141 = tl.load(in_ptr23 + (r2 + 128 * x1), tmp140 & xmask,
eviction_policy='evict_last', other=0.0)
tmp142 = tmp0 >= tmp104
tmp143 = tmp0 < tmp137
tmp144 = tmp142 & tmp143
tmp145 = tl.load(in_ptr24 + (r2 + 128 * x1), tmp144 & xmask,
eviction_policy='evict_last', other=0.0)
tmp146 = tl.where(tmp144, tmp145, tmp125)
tmp147 = tl.where(tmp140, tmp141, tmp146)
tmp148 = tl.where(tmp135, tmp136, tmp147)
tmp149 = tl.where(tmp130, tmp131, tmp148)
tmp150 = tl.full([1, 1], 28, tl.int64)
tmp151 = tmp0 >= tmp150
tmp152 = tl.full([1, 1], 29, tl.int64)
tmp153 = tmp0 < tmp152
tmp154 = tmp151 & tmp153
tmp155 = tl.load(in_ptr25 + (r2 + 128 * x1), tmp154 & xmask,
eviction_policy='evict_last', other=0.0)
tmp156 = tl.full([1, 1], 27, tl.int64)
tmp157 = tmp0 >= tmp156
tmp158 = tmp0 < tmp150
tmp159 = tmp157 & tmp158
tmp160 = tl.load(in_ptr26 + (r2 + 128 * x1), tmp159 & xmask,
eviction_policy='evict_last', other=0.0)
tmp161 = tl.full([1, 1], 26, tl.int64)
tmp162 = tmp0 >= tmp161
tmp163 = tmp0 < tmp156
tmp164 = tmp162 & tmp163
tmp165 = tl.load(in_ptr27 + (r2 + 128 * x1), tmp164 & xmask,
eviction_policy='evict_last', other=0.0)
tmp166 = tmp0 >= tmp128
tmp167 = tmp0 < tmp161
tmp168 = tmp166 & tmp167
tmp169 = tl.load(in_ptr28 + (r2 + 128 * x1), tmp168 & xmask,
eviction_policy='evict_last', other=0.0)
tmp170 = tl.where(tmp168, tmp169, tmp149)
tmp171 = tl.where(tmp164, tmp165, tmp170)
tmp172 = tl.where(tmp159, tmp160, tmp171)
tmp173 = tl.where(tmp154, tmp155, tmp172)
tmp174 = tl.full([1, 1], 32, tl.int64)
tmp175 = tmp0 >= tmp174
tmp176 = tl.full([1, 1], 33, tl.int64)
tmp177 = tmp0 < tmp176
tmp178 = tmp175 & tmp177
tmp179 = tl.load(in_ptr29 + (r2 + 128 * x1), tmp178 & xmask,
eviction_policy='evict_last', other=0.0)
tmp180 = tl.full([1, 1], 31, tl.int64)
tmp181 = tmp0 >= tmp180
tmp182 = tmp0 < tmp174
tmp183 = tmp181 & tmp182
tmp184 = tl.load(in_ptr30 + (r2 + 128 * x1), tmp183 & xmask,
eviction_policy='evict_last', other=0.0)
tmp185 = tl.full([1, 1], 30, tl.int64)
tmp186 = tmp0 >= tmp185
tmp187 = tmp0 < tmp180
tmp188 = tmp186 & tmp187
tmp189 = tl.load(in_ptr31 + (r2 + 128 * x1), tmp188 & xmask,
eviction_policy='evict_last', other=0.0)
tmp190 = tmp0 >= tmp152
tmp191 = tmp0 < tmp185
tmp192 = tmp190 & tmp191
tmp193 = tl.load(in_ptr32 + (r2 + 128 * x1), tmp192 & xmask,
eviction_policy='evict_last', other=0.0)
tmp194 = tl.where(tmp192, tmp193, tmp173)
tmp195 = tl.where(tmp188, tmp189, tmp194)
tmp196 = tl.where(tmp183, tmp184, tmp195)
tmp197 = tl.where(tmp178, tmp179, tmp196)
tmp198 = tl.full([1, 1], 36, tl.int64)
tmp199 = tmp0 >= tmp198
tmp200 = tl.full([1, 1], 37, tl.int64)
tmp201 = tmp0 < tmp200
tmp202 = tmp199 & tmp201
tmp203 = tl.load(in_ptr33 + (r2 + 128 * x1), tmp202 & xmask,
eviction_policy='evict_last', other=0.0)
tmp204 = tl.full([1, 1], 35, tl.int64)
tmp205 = tmp0 >= tmp204
tmp206 = tmp0 < tmp198
tmp207 = tmp205 & tmp206
tmp208 = tl.load(in_ptr34 + (r2 + 128 * x1), tmp207 & xmask,
eviction_policy='evict_last', other=0.0)
tmp209 = tl.full([1, 1], 34, tl.int64)
tmp210 = tmp0 >= tmp209
tmp211 = tmp0 < tmp204
tmp212 = tmp210 & tmp211
tmp213 = tl.load(in_ptr35 + (r2 + 128 * x1), tmp212 & xmask,
eviction_policy='evict_last', other=0.0)
tmp214 = tmp0 >= tmp176
tmp215 = tmp0 < tmp209
tmp216 = tmp214 & tmp215
tmp217 = tl.load(in_ptr36 + (r2 + 128 * x1), tmp216 & xmask,
eviction_policy='evict_last', other=0.0)
tmp218 = tl.where(tmp216, tmp217, tmp197)
tmp219 = tl.where(tmp212, tmp213, tmp218)
tmp220 = tl.where(tmp207, tmp208, tmp219)
tmp221 = tl.where(tmp202, tmp203, tmp220)
tmp222 = tl.full([1, 1], 40, tl.int64)
tmp223 = tmp0 >= tmp222
tmp224 = tl.full([1, 1], 41, tl.int64)
tmp225 = tmp0 < tmp224
tmp226 = tmp223 & tmp225
tmp227 = tl.load(in_ptr37 + (r2 + 128 * x1), tmp226 & xmask,
eviction_policy='evict_last', other=0.0)
tmp228 = tl.full([1, 1], 39, tl.int64)
tmp229 = tmp0 >= tmp228
tmp230 = tmp0 < tmp222
tmp231 = tmp229 & tmp230
tmp232 = tl.load(in_ptr38 + (r2 + 128 * x1), tmp231 & xmask,
eviction_policy='evict_last', other=0.0)
tmp233 = tl.full([1, 1], 38, tl.int64)
tmp234 = tmp0 >= tmp233
tmp235 = tmp0 < tmp228
tmp236 = tmp234 & tmp235
tmp237 = tl.load(in_ptr39 + (r2 + 128 * x1), tmp236 & xmask,
eviction_policy='evict_last', other=0.0)
tmp238 = tmp0 >= tmp200
tmp239 = tmp0 < tmp233
tmp240 = tmp238 & tmp239
tmp241 = tl.load(in_ptr40 + (r2 + 128 * x1), tmp240 & xmask,
eviction_policy='evict_last', other=0.0)
tmp242 = tl.where(tmp240, tmp241, tmp221)
tmp243 = tl.where(tmp236, tmp237, tmp242)
tmp244 = tl.where(tmp231, tmp232, tmp243)
tmp245 = tl.where(tmp226, tmp227, tmp244)
tmp246 = tl.full([1, 1], 44, tl.int64)
tmp247 = tmp0 >= tmp246
tmp248 = tl.full([1, 1], 45, tl.int64)
tmp249 = tmp0 < tmp248
tmp250 = tmp247 & tmp249
tmp251 = tl.load(in_ptr41 + (r2 + 128 * x1), tmp250 & xmask,
eviction_policy='evict_last', other=0.0)
tmp252 = tl.full([1, 1], 43, tl.int64)
tmp253 = tmp0 >= tmp252
tmp254 = tmp0 < tmp246
tmp255 = tmp253 & tmp254
tmp256 = tl.load(in_ptr42 + (r2 + 128 * x1), tmp255 & xmask,
eviction_policy='evict_last', other=0.0)
tmp257 = tl.full([1, 1], 42, tl.int64)
tmp258 = tmp0 >= tmp257
tmp259 = tmp0 < tmp252
tmp260 = tmp258 & tmp259
tmp261 = tl.load(in_ptr43 + (r2 + 128 * x1), tmp260 & xmask,
eviction_policy='evict_last', other=0.0)
tmp262 = tmp0 >= tmp224
tmp263 = tmp0 < tmp257
tmp264 = tmp262 & tmp263
tmp265 = tl.load(in_ptr44 + (r2 + 128 * x1), tmp264 & xmask,
eviction_policy='evict_last', other=0.0)
tmp266 = tl.where(tmp264, tmp265, tmp245)
tmp267 = tl.where(tmp260, tmp261, tmp266)
tmp268 = tl.where(tmp255, tmp256, tmp267)
tmp269 = tl.where(tmp250, tmp251, tmp268)
tmp270 = tl.full([1, 1], 48, tl.int64)
tmp271 = tmp0 >= tmp270
tmp272 = tl.full([1, 1], 49, tl.int64)
tmp273 = tmp0 < tmp272
tmp274 = tmp271 & tmp273
tmp275 = tl.load(in_ptr45 + (r2 + 128 * x1), tmp274 & xmask,
eviction_policy='evict_last', other=0.0)
tmp276 = tl.full([1, 1], 47, tl.int64)
tmp277 = tmp0 >= tmp276
tmp278 = tmp0 < tmp270
tmp279 = tmp277 & tmp278
tmp280 = tl.load(in_ptr46 + (r2 + 128 * x1), tmp279 & xmask,
eviction_policy='evict_last', other=0.0)
tmp281 = tl.full([1, 1], 46, tl.int64)
tmp282 = tmp0 >= tmp281
tmp283 = tmp0 < tmp276
tmp284 = tmp282 & tmp283
tmp285 = tl.load(in_ptr47 + (r2 + 128 * x1), tmp284 & xmask,
eviction_policy='evict_last', other=0.0)
tmp286 = tmp0 >= tmp248
tmp287 = tmp0 < tmp281
tmp288 = tmp286 & tmp287
tmp289 = tl.load(in_ptr48 + (r2 + 128 * x1), tmp288 & xmask,
eviction_policy='evict_last', other=0.0)
tmp290 = tl.where(tmp288, tmp289, tmp269)
tmp291 = tl.where(tmp284, tmp285, tmp290)
tmp292 = tl.where(tmp279, tmp280, tmp291)
tmp293 = tl.where(tmp274, tmp275, tmp292)
tmp294 = tl.full([1, 1], 52, tl.int64)
tmp295 = tmp0 >= tmp294
tmp296 = tl.full([1, 1], 53, tl.int64)
tmp297 = tmp0 < tmp296
tmp298 = tmp295 & tmp297
tmp299 = tl.load(in_ptr49 + (r2 + 128 * x1), tmp298 & xmask,
eviction_policy='evict_last', other=0.0)
tmp300 = tl.full([1, 1], 51, tl.int64)
tmp301 = tmp0 >= tmp300
tmp302 = tmp0 < tmp294
tmp303 = tmp301 & tmp302
tmp304 = tl.load(in_ptr50 + (r2 + 128 * x1), tmp303 & xmask,
eviction_policy='evict_last', other=0.0)
tmp305 = tl.full([1, 1], 50, tl.int64)
tmp306 = tmp0 >= tmp305
tmp307 = tmp0 < tmp300
tmp308 = tmp306 & tmp307
tmp309 = tl.load(in_ptr51 + (r2 + 128 * x1), tmp308 & xmask,
eviction_policy='evict_last', other=0.0)
tmp310 = tmp0 >= tmp272
tmp311 = tmp0 < tmp305
tmp312 = tmp310 & tmp311
tmp313 = tl.load(in_ptr52 + (r2 + 128 * x1), tmp312 & xmask,
eviction_policy='evict_last', other=0.0)
tmp314 = tl.where(tmp312, tmp313, tmp293)
tmp315 = tl.where(tmp308, tmp309, tmp314)
tmp316 = tl.where(tmp303, tmp304, tmp315)
tmp317 = tl.where(tmp298, tmp299, tmp316)
tmp318 = tl.full([1, 1], 56, tl.int64)
tmp319 = tmp0 >= tmp318
tmp320 = tl.full([1, 1], 57, tl.int64)
tmp321 = tmp0 < tmp320
tmp322 = tmp319 & tmp321
tmp323 = tl.load(in_ptr53 + (r2 + 128 * x1), tmp322 & xmask,
eviction_policy='evict_last', other=0.0)
tmp324 = tl.full([1, 1], 55, tl.int64)
tmp325 = tmp0 >= tmp324
tmp326 = tmp0 < tmp318
tmp327 = tmp325 & tmp326
tmp328 = tl.load(in_ptr54 + (r2 + 128 * x1), tmp327 & xmask,
eviction_policy='evict_last', other=0.0)
tmp329 = tl.full([1, 1], 54, tl.int64)
tmp330 = tmp0 >= tmp329
tmp331 = tmp0 < tmp324
tmp332 = tmp330 & tmp331
tmp333 = tl.load(in_ptr55 + (r2 + 128 * x1), tmp332 & xmask,
eviction_policy='evict_last', other=0.0)
tmp334 = tmp0 >= tmp296
tmp335 = tmp0 < tmp329
tmp336 = tmp334 & tmp335
tmp337 = tl.load(in_ptr56 + (r2 + 128 * x1), tmp336 & xmask,
eviction_policy='evict_last', other=0.0)
tmp338 = tl.where(tmp336, tmp337, tmp317)
tmp339 = tl.where(tmp332, tmp333, tmp338)
tmp340 = tl.where(tmp327, tmp328, tmp339)
tmp341 = tl.where(tmp322, tmp323, tmp340)
tmp342 = tl.full([1, 1], 60, tl.int64)
tmp343 = tmp0 >= tmp342
tmp344 = tl.full([1, 1], 61, tl.int64)
tmp345 = tmp0 < tmp344
tmp346 = tmp343 & tmp345
tmp347 = tl.load(in_ptr57 + (r2 + 128 * x1), tmp346 & xmask,
eviction_policy='evict_last', other=0.0)
tmp348 = tl.full([1, 1], 59, tl.int64)
tmp349 = tmp0 >= tmp348
tmp350 = tmp0 < tmp342
tmp351 = tmp349 & tmp350
tmp352 = tl.load(in_ptr58 + (r2 + 128 * x1), tmp351 & xmask,
eviction_policy='evict_last', other=0.0)
tmp353 = tl.full([1, 1], 58, tl.int64)
tmp354 = tmp0 >= tmp353
tmp355 = tmp0 < tmp348
tmp356 = tmp354 & tmp355
tmp357 = tl.load(in_ptr59 + (r2 + 128 * x1), tmp356 & xmask,
eviction_policy='evict_last', other=0.0)
tmp358 = tmp0 >= tmp320
tmp359 = tmp0 < tmp353
tmp360 = tmp358 & tmp359
tmp361 = tl.load(in_ptr60 + (r2 + 128 * x1), tmp360 & xmask,
eviction_policy='evict_last', other=0.0)
tmp362 = tl.where(tmp360, tmp361, tmp341)
tmp363 = tl.where(tmp356, tmp357, tmp362)
tmp364 = tl.where(tmp351, tmp352, tmp363)
tmp365 = tl.where(tmp346, tmp347, tmp364)
tmp366 = tl.full([1, 1], 63, tl.int64)
tmp367 = tmp0 >= tmp366
tmp368 = tl.load(in_ptr61 + (r2 + 128 * x1), tmp367 & xmask,
eviction_policy='evict_last', other=0.0)
tmp369 = tl.full([1, 1], 62, tl.int64)
tmp370 = tmp0 >= tmp369
tmp371 = tmp0 < tmp366
tmp372 = tmp370 & tmp371
tmp373 = tl.load(in_ptr62 + (r2 + 128 * x1), tmp372 & xmask,
eviction_policy='evict_last', other=0.0)
tmp374 = tmp0 >= tmp344
tmp375 = tmp0 < tmp369
tmp376 = tmp374 & tmp375
tmp377 = tl.load(in_ptr63 + (r2 + 128 * x1), tmp376 & xmask,
eviction_policy='evict_last', other=0.0)
tmp378 = tl.where(tmp376, tmp377, tmp365)
tmp379 = tl.where(tmp372, tmp373, tmp378)
tmp380 = tl.where(tmp367, tmp368, tmp379)
tmp381 = tmp380 * tmp380
tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK])
tmp384 = tl.where(xmask, tmp382, 0)
tmp385 = tl.sum(tmp384, 1)[:, None]
tmp386 = libdevice.sqrt(tmp385)
tl.store(in_out_ptr0 + (r2 + 128 * x3), tmp380, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp386, xmask)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = 1e-12
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 / tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = _tmp7 + tmp6
_tmp7 = tl.where(rmask & xmask, tmp8, _tmp7)
tmp7 = tl.sum(_tmp7, 1)[:, None]
tmp9 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = 1e-12
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp10 / tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp12)
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_3, (64, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
get_raw_stream(0)
triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0,
16384, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096,
1), torch.float32)
buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096,
1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
triton_poi_fused_div_sub_1[grid(2097152)](primals_1, buf0,
primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19,
buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39,
buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60,
buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80,
buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100,
buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118,
buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136,
buf138, buf141, buf143, buf145, 2097152, XBLOCK=512, num_warps=
8, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32)
triton_per_fused__softmax_2[grid(16384)](buf2, buf3, buf4, 16384,
64, XBLOCK=8, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sub_sum_3[grid(512)](buf1, primals_3, buf2,
buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19,
buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39,
buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60,
buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16,
buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36,
buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56,
buf58, buf61, buf63, buf65, buf67, 512, 4096, XBLOCK=1, RBLOCK=
1024, num_warps=16, num_stages=1)
buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sum_4[grid(512)](buf69, buf2, buf3, buf4,
buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89,
buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107,
buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125,
buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83,
buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103,
buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121,
buf124, buf126, buf128, buf130, 512, 4096, XBLOCK=1, RBLOCK=
1024, num_warps=16, num_stages=1)
buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sum_5[grid(512)](buf132, buf2, buf3, buf4,
buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135,
buf137, buf139, buf142, buf144, buf146, 512, 4096, XBLOCK=1,
RBLOCK=1024, num_warps=16, num_stages=1)
buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32)
buf23 = buf14
del buf14
buf32 = buf23
del buf23
buf41 = buf32
del buf32
buf50 = buf41
del buf41
buf59 = buf50
del buf50
buf68 = buf59
del buf59
buf77 = buf68
del buf68
buf86 = buf77
del buf77
buf95 = buf86
del buf86
buf104 = buf95
del buf95
buf113 = buf104
del buf104
buf122 = buf113
del buf113
buf131 = buf122
del buf122
buf140 = buf131
del buf131
buf147 = buf140
del buf140
buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32)
buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0)
del buf148
triton_per_fused_copy_linalg_vector_norm_zeros_6[grid(256)](buf147,
buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18,
buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34,
buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67,
buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83,
buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99,
buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117,
buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135,
buf133, buf146, buf144, buf142, 256, 128, XBLOCK=8, num_warps=8,
num_stages=1)
del buf101
del buf103
del buf106
del buf108
del buf11
del buf110
del buf112
del buf115
del buf117
del buf119
del buf121
del buf124
del buf126
del buf128
del buf13
del buf130
del buf133
del buf135
del buf137
del buf139
del buf142
del buf144
del buf146
del buf16
del buf18
del buf20
del buf22
del buf25
del buf27
del buf29
del buf31
del buf34
del buf36
del buf38
del buf40
del buf43
del buf45
del buf47
del buf49
del buf5
del buf52
del buf54
del buf56
del buf58
del buf61
del buf63
del buf65
del buf67
del buf7
del buf70
del buf72
del buf74
del buf76
del buf79
del buf81
del buf83
del buf85
del buf88
del buf9
del buf90
del buf92
del buf94
del buf97
del buf99
buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0)
del buf150
buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32)
triton_red_fused_div_linalg_vector_norm_7[grid(4)](buf151, buf147,
buf149, buf152, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor(
primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15,
buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35,
buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55,
buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75,
buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96,
buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114,
buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132,
buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151)
class NetVLADNew(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128, normalize_input=True,
vladv2=False, use_faiss=True):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
vladv2 : bool
If true, use vladv2 otherwise use vladv1
"""
super().__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=
vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
self.use_faiss = use_faiss
def init_params(self, clsts, traindescs):
if not self.vladv2:
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha *
clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
if not self.use_faiss:
knn = NearestNeighbors(n_jobs=-1)
knn.fit(traindescs)
del traindescs
ds_sq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
else:
index = faiss.IndexFlatL2(traindescs.shape[1])
index.add(traindescs)
del traindescs
ds_sq = np.square(index.search(clsts, 2)[1])
del index
self.alpha = (-np.log(0.01) / np.mean(ds_sq[:, 1] - ds_sq[:, 0])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, ds_sq
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.
centroids).unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm
(dim=1))
def forward(self, input_0):
primals_3 = self.centroids
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| StephenHausler/Patch-NetVLAD | NetVLAD | false | 9,827 | [
"MIT"
] | 0 | 5d8b68fb7aa686e9c08a48ce504ecc552fff7b0b | https://github.com/StephenHausler/Patch-NetVLAD/tree/5d8b68fb7aa686e9c08a48ce504ecc552fff7b0b |
LayerScale | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/n3/cn35ktybwif7rqmutznvx4tpvoabmd7tz7tkzr4iou2t3kvpzivg.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class LayerScaleNew(nn.Module):
"""Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
This rescales diagonaly residual outputs close to 0 initially, then learnt.
"""
def __init__(self, channels: 'int', init: 'float'=0):
super().__init__()
self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True))
self.scale.data[:] = init
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| xvdp/demucs | LayerScale | false | 11,084 | [
"MIT"
] | 0 | 0a5e3b72c6388801cf0086c2b84d09f6d73c389c | https://github.com/xvdp/demucs/tree/0a5e3b72c6388801cf0086c2b84d09f6d73c389c |
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/4d/c4d7os35bf4bckecmik4nlyqqsirmteh4sh3yxnab5lmuntnmwk2.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (36*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/j5/cj5nf2owtsdm2zwcezqxpyn63iwddjyadpotkhm2ua52inoqxdcl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tl/ctlxctn7eg6nwvpdhdhyqadp63cm2ogdwxsotfynexn2zw62nfbb.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ku/ckuw5gg26ddjp4n4da74yttcx6jxcy2y4vb2npxdoq42pzni2oot.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/m3/cm3haovccm7lav2s6wgp3wthu7in42r335z2o7yva4d7olh5begj.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ir/cirx6nbkabstacj3yb3umtzb7ustxzn5ha5etdpsewqc2v53x42u.py
# Topologically Sorted Source Nodes: [conv2d_3, x_act], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_act => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + (4*x2) + (64*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mx/cmxxnyyr5kmtzdpzon3a5fqr6k4jayrantx3iscrc5pevtb6lc52.py
# Topologically Sorted Source Nodes: [x_act_2], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# x_act_2 => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_8 = async_compile.triton('triton_per_fused__log_softmax_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_8(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (16*x0)), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/er/cerrhd6dfklfkghffr4w4v6k4tkknpp6pjf2fpylkzd3qma7oygl.py
# Topologically Sorted Source Nodes: [conv2d_4, x_val], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_val => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = (yindex // 2)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + (2*x2) + (32*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/dy/cdyy4l65r6roouxfxf2rt7jc3yi26kd72lw6ykhtgaiqpacjtrts.py
# Topologically Sorted Source Nodes: [x_val_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_val_2 => relu_5
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_15), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_10 = async_compile.triton('triton_poi_fused_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/jp/cjpw3qv3xbeeitqrvxn6apmx6vcxqlrxpbbtjnzsyqgtt4tatr6q.py
# Topologically Sorted Source Nodes: [x_val_4], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_val_4 => tanh
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_17), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_11 = async_compile.triton('triton_poi_fused_tanh_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (16, 64), (64, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_13, (2, ), (1, ))
assert_size_stride(primals_14, (64, 32), (32, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (1, 64), (64, 1))
assert_size_stride(primals_17, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 128, 9, grid=grid(128, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf5, primals_2, 2048, grid=grid(2048), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf7, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf9, primals_7, 8192, grid=grid(8192), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4))
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_act], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf10, primals_9, buf11, buf23, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 16), (1, 64), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_act_2], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_8.run(buf12, buf15, 4, 16, grid=grid(4), stream=stream0)
del buf12
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 2, 4, 4), (32, 1, 8, 2))
buf17 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, x_val], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf16, primals_13, buf17, buf22, 8, 16, grid=grid(8, 16), stream=stream0)
del buf16
del primals_13
buf18 = reinterpret_tensor(buf10, (4, 64), (64, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (4, 32), (32, 1), 0), reinterpret_tensor(primals_14, (32, 64), (1, 32), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_val_2], Original ATen: [aten.relu]
triton_poi_fused_relu_10.run(buf19, primals_15, 256, grid=grid(256), stream=stream0)
del primals_15
buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf19, reinterpret_tensor(primals_16, (64, 1), (1, 64), 0), out=buf20)
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_val_4], Original ATen: [aten.tanh]
triton_poi_fused_tanh_11.run(buf21, primals_17, 4, grid=grid(4), stream=stream0)
del primals_17
return (buf15, buf21, buf0, buf1, buf2, buf3, primals_8, primals_12, buf5, buf7, buf9, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), buf15, reinterpret_tensor(buf17, (4, 32), (32, 1), 0), buf19, buf21, primals_16, primals_14, buf22, primals_10, buf23, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_per_fused__log_softmax_8(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 16 * x0), tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 8
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 32 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 2 * x2 + 32 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_tanh_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = libdevice.tanh(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 64), (64, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_13, (2,), (1,))
assert_size_stride(primals_14, (64, 32), (32, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (1, 64), (64, 1))
assert_size_stride(primals_17, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 9)](primals_1, buf0, 128, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_4[grid(2048)](buf5, primals_2,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(4096)](buf7, primals_5,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_6[grid(8192)](buf9, primals_7,
8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4))
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(16, 16)](
buf10, primals_9, buf11, buf23, 16, 16, XBLOCK=16, YBLOCK=16,
num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64),
(64, 1), 0), reinterpret_tensor(primals_10, (64, 16), (1, 64),
0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused__log_softmax_8[grid(4)](buf12, buf15, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del buf12
buf16 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 2, 4, 4), (32, 1, 8, 2))
buf17 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(8, 16)](
buf16, primals_13, buf17, buf22, 8, 16, XBLOCK=16, YBLOCK=8,
num_warps=4, num_stages=1)
del buf16
del primals_13
buf18 = reinterpret_tensor(buf10, (4, 64), (64, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf17, (4, 32), (32, 1), 0),
reinterpret_tensor(primals_14, (32, 64), (1, 32), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_relu_10[grid(256)](buf19, primals_15, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_15
buf20 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf19, reinterpret_tensor(primals_16, (64, 1), (1,
64), 0), out=buf20)
buf21 = buf20
del buf20
triton_poi_fused_tanh_11[grid(4)](buf21, primals_17, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_17
return (buf15, buf21, buf0, buf1, buf2, buf3, primals_8, primals_12,
buf5, buf7, buf9, reinterpret_tensor(buf11, (4, 64), (64, 1), 0),
buf15, reinterpret_tensor(buf17, (4, 32), (32, 1), 0), buf19, buf21,
primals_16, primals_14, buf22, primals_10, buf23)
class NetNew(nn.Module):
"""policy-value network module"""
def __init__(self, board_width, board_height):
super(NetNew, self).__init__()
self.board_width = board_width
self.board_height = board_height
self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1)
self.act_fc1 = nn.Linear(4 * board_width * board_height,
board_width * board_height)
self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1)
self.val_fc1 = nn.Linear(2 * board_width * board_height, 64)
self.val_fc2 = nn.Linear(64, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.act_conv1.weight
primals_9 = self.act_conv1.bias
primals_10 = self.act_fc1.weight
primals_11 = self.act_fc1.bias
primals_12 = self.val_conv1.weight
primals_13 = self.val_conv1.bias
primals_14 = self.val_fc1.weight
primals_15 = self.val_fc1.bias
primals_16 = self.val_fc2.weight
primals_17 = self.val_fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0], output[1]
| moddent/Gomoku_Deep | Net | false | 10,577 | [
"MIT"
] | 0 | 5d9bca97e6b30db4f99a4686152bcef7a6160ac6 | https://github.com/moddent/Gomoku_Deep/tree/5d9bca97e6b30db4f99a4686152bcef7a6160ac6 |
compute_g_spa | import torch
import torch.nn as nn
class cnn1x1(nn.Module):
def __init__(self, dim1=3, dim2=3, bias=True):
super(cnn1x1, self).__init__()
self.cnn = nn.Conv2d(dim1, dim2, kernel_size=1, bias=bias)
def forward(self, x):
x = self.cnn(x)
return x
class compute_g_spa(nn.Module):
def __init__(self, dim1=64 * 3, dim2=64 * 3, bias=False):
super(compute_g_spa, self).__init__()
self.dim1 = dim1
self.dim2 = dim2
self.g1 = cnn1x1(self.dim1, self.dim2, bias=bias)
self.g2 = cnn1x1(self.dim1, self.dim2, bias=bias)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x1):
g1 = self.g1(x1).permute(0, 3, 2, 1).contiguous()
g2 = self.g2(x1).permute(0, 3, 1, 2).contiguous()
g3 = g1.matmul(g2)
g = self.softmax(g3)
return g
def get_inputs():
return [torch.rand([4, 192, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 192 * x2 + 786432 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 192
x1 = xindex // 192 % 64
x2 = xindex // 12288 % 64
x3 = xindex // 786432
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 192 * x2 + 12288 * x1 + 786432 * x3), None)
tl.store(out_ptr0 + x4, tmp0, None)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 12288
y1 = yindex // 12288
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12288 * x2 + 786432 * y1), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 64 * y3), tmp0, xmask)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tmp5 / tmp8
tl.store(out_ptr2 + (r1 + 64 * x0), tmp9, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (192, 192, 1, 1), (192, 1, 1, 1))
assert_size_stride(primals_2, (4, 192, 64, 64), (786432, 4096, 64, 1))
assert_size_stride(primals_3, (192, 192, 1, 1), (192, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(768, 4096)](primals_2, buf0, 768, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 192, 64, 64), (786432, 1, 12288, 192))
buf2 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 192, 64, 64), (786432, 1, 12288, 192))
buf3 = empty_strided_cuda((4, 64, 64, 192), (786432, 12288, 192, 1),
torch.float32)
triton_poi_fused_clone_1[grid(3145728)](buf1, buf3, 3145728, XBLOCK
=512, num_warps=8, num_stages=1)
buf4 = reinterpret_tensor(buf1, (4, 64, 192, 64), (786432, 12288,
64, 1), 0)
del buf1
triton_poi_fused_clone_2[grid(49152, 64)](buf2, buf4, 49152, 64,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
buf5 = empty_strided_cuda((256, 64, 64), (4096, 64, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (256, 64, 192), (12288,
192, 1), 0), reinterpret_tensor(buf4, (256, 192, 64), (12288,
64, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_per_fused__softmax_3[grid(16384)](buf5, buf8, 16384, 64,
XBLOCK=8, num_warps=4, num_stages=1)
del buf5
return buf8, primals_1, buf0, primals_3, buf8, reinterpret_tensor(buf3,
(256, 192, 64), (12288, 1, 192), 0), reinterpret_tensor(buf4, (256,
64, 192), (12288, 1, 64), 0)
class cnn1x1(nn.Module):
def __init__(self, dim1=3, dim2=3, bias=True):
super(cnn1x1, self).__init__()
self.cnn = nn.Conv2d(dim1, dim2, kernel_size=1, bias=bias)
def forward(self, x):
x = self.cnn(x)
return x
class compute_g_spaNew(nn.Module):
def __init__(self, dim1=64 * 3, dim2=64 * 3, bias=False):
super(compute_g_spaNew, self).__init__()
self.dim1 = dim1
self.dim2 = dim2
self.g1 = cnn1x1(self.dim1, self.dim2, bias=bias)
self.g2 = cnn1x1(self.dim1, self.dim2, bias=bias)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_1 = self.g1.cnn.weight
primals_3 = self.g2.cnn.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| fabro66/Online-Skeleton-based-Action-Recognition | compute_g_spa | false | 15,363 | [
"MIT"
] | 63 | de00cbf17ceea98a7d07f68bbbd966bfd02d3b40 | https://github.com/fabro66/Online-Skeleton-based-Action-Recognition/tree/de00cbf17ceea98a7d07f68bbbd966bfd02d3b40 |
NormAttnMap | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yg/cygpjynbou473tf4noi2cem3yrk3bayubj7lfcuqbdqshgaaj7wb.py
# Topologically Sorted Source Nodes: [max_1, setitem], Original ATen: [aten.max, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# max_1 => max_1
# setitem => full_default, index_put
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, 1, True), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%getitem, [%le], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_max_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 <= tmp7
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mq/cmqzmtg5i7c3jiw6tatnxnuezvocfhdxvxtgr246t3g35eioloo2.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.div]
# Source node to ATen node mapping:
# attn => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %index_put), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, setitem], Original ATen: [aten.max, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.div]
triton_poi_fused_div_1.run(arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_put_lift_fresh_max_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 <= tmp7
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_max_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_1[grid(256)](arg0_1, buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf1, buf0
class NormAttnMapNew(nn.Module):
def __init__(self, norm_type='cossim'):
super(NormAttnMapNew, self).__init__()
self.norm_type = norm_type
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| sibeiyang/sgmn | NormAttnMap | false | 16,438 | [
"MIT"
] | 130 | 00731b4f2202246d40a36d2a6727c599e6e649aa | https://github.com/sibeiyang/sgmn/tree/00731b4f2202246d40a36d2a6727c599e6e649aa |
GAT | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/i4/ci4j7o62hjlvxysby5leuec4f5mnobz3p5wi5zmgnb6pfgczycms.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_2, %repeat_1], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*((((4*x1) + x0) // 16) % 4)) + (16*((((4*x1) + (64*x2) + x0) // 64) % 4)) + ((((4*x1) + x0) % 16) % 4)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*x2) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/fy/cfyhpfvlh7v2kamyddf44ycfki2eygiwxnllf3xlbccy7vzxtcnc.py
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# e => gt
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/c3/cc3jesmqsfkxzdmzwd3u5t52xvkpzl4rtjwuve7z2oe4uqfzknpd.py
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
# Source node to ATen node mapping:
# gt => gt_1
# Graph fragment:
# %gt_1 : [num_users=5] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {})
triton_poi_fused_gt_2 = async_compile.triton('triton_poi_fused_gt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/f5/cf5atbk7j66ttv4hfabzy4hlvzlgp4cmnvsrrdv7lu2mgbytrlxj.py
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => amax, exp, sub, sum_1
# attention_10 => amax_3, exp_3, sub_3, sum_4
# attention_3 => where_4
# attention_4 => amax_1, exp_1, sub_1, sum_2
# attention_6 => where_7
# attention_7 => amax_2, exp_2, sub_2, sum_3
# attention_9 => where_10
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_7, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_10, [1], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_3 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 36, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr2 + (32 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp24 = tl.load(in_ptr2 + (48 + x0), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp41 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp46 = tl.load(in_ptr4 + (16 + x0), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr3 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp52 = tl.load(in_ptr4 + (32 + x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr3 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp58 = tl.load(in_ptr4 + (48 + x0), xmask, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp75 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp80 = tl.load(in_ptr6 + (16 + x0), xmask, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr5 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp86 = tl.load(in_ptr6 + (32 + x0), xmask, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr5 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp92 = tl.load(in_ptr6 + (48 + x0), xmask, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp109 = tl.load(in_ptr8 + (x0), xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (16 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp114 = tl.load(in_ptr8 + (16 + x0), xmask, eviction_policy='evict_last')
tmp119 = tl.load(in_ptr7 + (32 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp120 = tl.load(in_ptr8 + (32 + x0), xmask, eviction_policy='evict_last')
tmp125 = tl.load(in_ptr7 + (48 + x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp126 = tl.load(in_ptr8 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + (x2), tmp28, xmask)
tl.store(out_ptr1 + (x2), tmp39, xmask)
tl.store(out_ptr2 + (x2), tmp62, xmask)
tl.store(out_ptr3 + (x2), tmp73, xmask)
tl.store(out_ptr4 + (x2), tmp96, xmask)
tl.store(out_ptr5 + (x2), tmp107, xmask)
tl.store(out_ptr6 + (x2), tmp130, xmask)
tl.store(out_ptr7 + (x2), tmp141, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lu/cludl62f6qo4f34m7ejzpxdcunrj3shfeb7xo4jvuj3n5a4sh6a6.py
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => div, exp, sub
# attention_10 => div_3, exp_3, sub_3
# attention_3 => where_4
# attention_4 => div_1, exp_1, sub_1
# attention_6 => where_7
# attention_7 => div_2, exp_2, sub_2
# attention_9 => where_10
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_3), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_4), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_4 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*i1', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*i1', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp14 = tl.load(in_ptr6 + (x4), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr7 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr8 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr9 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp24 = tl.load(in_ptr10 + (x4), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr11 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr12 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr13 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp34 = tl.load(in_ptr14 + (x4), xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr15 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr16 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(out_ptr0 + (x3), tmp12, xmask)
tl.store(out_ptr1 + (x3), tmp22, xmask)
tl.store(out_ptr2 + (x3), tmp32, xmask)
tl.store(out_ptr3 + (x3), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/mu/cmu4dmnjb3m2bpsy345zghpbe6uqogqd4h7akjugnavnr5t7dfhe.py
# Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# h_prime => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/p6/cp6dbvqyf5xgpxxdiwciohgb2ayhhg4kfwvauizjk22u4hoilsvn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where_2, %where_5, %where_8, %where_11], 1), kwargs = {})
triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp18 & xmask, other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp30 & xmask, other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tmp40 = tl.full([1], 16, tl.int64)
tmp41 = tmp0 < tmp40
tmp42 = tl.load(in_ptr3 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp39 & xmask, other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + (x3), tmp52, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf1, 512, grid=grid(512), stream=stream0)
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
triton_poi_fused_gt_2.run(primals_4, buf4, 256, grid=grid(256), stream=stream0)
del primals_4
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf10)
del primals_5
buf11 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf10, buf11, 512, grid=grid(512), stream=stream0)
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (64, 8), (8, 1), 0), primals_6, out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_7, out=buf19)
del primals_7
buf20 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf19, buf20, 512, grid=grid(512), stream=stream0)
buf21 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf20, (64, 8), (8, 1), 0), primals_8, out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf21, buf22, 64, grid=grid(64), stream=stream0)
buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_9, out=buf28)
del primals_9
buf29 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf28, buf29, 512, grid=grid(512), stream=stream0)
buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf29, (64, 8), (8, 1), 0), primals_10, out=buf30)
buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf30, buf31, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf32 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_3.run(buf4, buf3, buf2, buf13, buf12, buf22, buf21, buf31, buf30, buf5, buf6, buf14, buf15, buf23, buf24, buf32, buf33, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_3, attention_4, e_2, attention_6, attention_7, e_3, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_4.run(buf4, buf3, buf2, buf5, buf6, buf13, buf12, buf14, buf15, buf22, buf21, buf23, buf24, buf31, buf30, buf32, buf33, buf7, buf16, buf25, buf34, 256, grid=grid(256), stream=stream0)
del buf12
del buf14
del buf15
del buf2
del buf21
del buf23
del buf24
del buf30
del buf32
del buf33
del buf5
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf0, buf8, 256, grid=grid(256), stream=stream0)
del buf0
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf10, buf17, 256, grid=grid(256), stream=stream0)
del buf10
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18)
buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf19, buf26, 256, grid=grid(256), stream=stream0)
del buf19
buf27 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf27)
buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf28, buf35, 256, grid=grid(256), stream=stream0)
del buf28
buf36 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36)
buf37 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_6.run(buf9, buf18, buf27, buf36, buf37, 1024, grid=grid(1024), stream=stream0)
return (buf37, buf3, buf4, buf7, buf9, buf13, buf16, buf18, buf22, buf25, buf27, buf31, buf34, buf36, reinterpret_tensor(buf35, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf29, (8, 64), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf20, (8, 64), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf11, (8, 64), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + 16 * ((4 * x1 +
64 * x2 + x0) // 64 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 &
xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (16 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (32 + x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (48 + x0), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp41 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp46 = tl.load(in_ptr4 + (16 + x0), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr3 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp52 = tl.load(in_ptr4 + (32 + x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr3 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp58 = tl.load(in_ptr4 + (48 + x0), xmask, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp75 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp80 = tl.load(in_ptr6 + (16 + x0), xmask, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr5 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp86 = tl.load(in_ptr6 + (32 + x0), xmask, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr5 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp92 = tl.load(in_ptr6 + (48 + x0), xmask, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp109 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (16 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp114 = tl.load(in_ptr8 + (16 + x0), xmask, eviction_policy='evict_last')
tmp119 = tl.load(in_ptr7 + (32 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp120 = tl.load(in_ptr8 + (32 + x0), xmask, eviction_policy='evict_last')
tmp125 = tl.load(in_ptr7 + (48 + x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp126 = tl.load(in_ptr8 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr1 + x2, tmp39, xmask)
tl.store(out_ptr2 + x2, tmp62, xmask)
tl.store(out_ptr3 + x2, tmp73, xmask)
tl.store(out_ptr4 + x2, tmp96, xmask)
tl.store(out_ptr5 + x2, tmp107, xmask)
tl.store(out_ptr6 + x2, tmp130, xmask)
tl.store(out_ptr7 + x2, tmp141, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr5 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp14 = tl.load(in_ptr6 + x4, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr7 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr8 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr9 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp24 = tl.load(in_ptr10 + x4, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr11 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr12 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp33 = tl.load(in_ptr13 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp34 = tl.load(in_ptr14 + x4, xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr15 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp41 = tl.load(in_ptr16 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp22, xmask)
tl.store(out_ptr2 + x3, tmp32, xmask)
tl.store(out_ptr3 + x3, tmp42, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp18 &
xmask, other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp30 &
xmask, other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tl.full([1], 16, tl.int64)
tmp42 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp39 &
xmask, other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + x3, tmp52, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0),
primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_gt_2[grid(256)](primals_4, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_5, out=buf10)
del primals_5
buf11 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf10, buf11, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (64, 8), (8, 1), 0),
primals_6, out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_7, out=buf19)
del primals_7
buf20 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf19, buf20, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (64, 8), (8, 1), 0),
primals_8, out=buf21)
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf21, buf22, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_9, out=buf28)
del primals_9
buf29 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf28, buf29, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf29, (64, 8), (8, 1), 0),
primals_10, out=buf30)
buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf30, buf31, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf32 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(64)](buf4,
buf3, buf2, buf13, buf12, buf22, buf21, buf31, buf30, buf5,
buf6, buf14, buf15, buf23, buf24, buf32, buf33, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf25 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf34 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_4[grid(256)](buf4,
buf3, buf2, buf5, buf6, buf13, buf12, buf14, buf15, buf22,
buf21, buf23, buf24, buf31, buf30, buf32, buf33, buf7, buf16,
buf25, buf34, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf12
del buf14
del buf15
del buf2
del buf21
del buf23
del buf24
del buf30
del buf32
del buf33
del buf5
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf0, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf10, buf17, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf10
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf16, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18
)
buf26 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf19, buf26, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf19
buf27 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), out=buf27
)
buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf28, buf35, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf28
buf36 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf34, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36
)
buf37 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_6[grid(1024)](buf9, buf18, buf27, buf36, buf37,
1024, XBLOCK=128, num_warps=4, num_stages=1)
return (buf37, buf3, buf4, buf7, buf9, buf13, buf16, buf18, buf22,
buf25, buf27, buf31, buf34, buf36, reinterpret_tensor(buf35, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf29, (8, 64), (1, 8), 0),
reinterpret_tensor(primals_10, (1, 8), (1, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0),
reinterpret_tensor(buf26, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf20, (8, 64), (1, 8), 0), reinterpret_tensor(
primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf17, (16, 4, 4),
(16, 1, 4), 0), reinterpret_tensor(buf11, (8, 64), (1, 8), 0),
reinterpret_tensor(primals_6, (1, 8), (1, 1), 0),
reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor(
primals_3, (1, 8), (1, 1), 0))
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
N = h.size()[1]
batch_size = h.size(0)
a_input = torch.cat([h.repeat(1, 1, N).view(batch_size, N * N, -1),
h.repeat(1, N, 1)], dim=2).view(batch_size, N, -1, 2 * self.
out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GATNew(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super(GATNew, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, input_0, input_1):
primals_2 = self.attention_0.W
primals_3 = self.attention_0.a
primals_5 = self.attention_1.W
primals_6 = self.attention_1.a
primals_7 = self.attention_2.W
primals_8 = self.attention_2.a
primals_9 = self.attention_3.W
primals_10 = self.attention_3.a
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| daiki-kimura/commonsense-rl | GAT | false | 12,256 | [
"Apache-2.0"
] | 0 | 5513926957b6501ce9cfa46f77f8f2c1c4892fa5 | https://github.com/daiki-kimura/commonsense-rl/tree/5513926957b6501ce9cfa46f77f8f2c1c4892fa5 |
GroupWiseLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/lv/clvhqra465e7rlmwkbu5xr6rfzlyh72yhs7d3zsjnfgsh2neujk4.py
# Topologically Sorted Source Nodes: [mul, x, x_1], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# mul => mul
# x => sum_1
# x_1 => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %primals_3), kwargs = {})
triton_poi_fused_add_mul_sum_0 = async_compile.triton('triton_poi_fused_add_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, x, x_1], Original ATen: [aten.mul, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sum_0.run(primals_1, primals_2, primals_3, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_3
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sum_0[grid(64)](primals_1, primals_2,
primals_3, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class GroupWiseLinearNew(nn.Module):
def __init__(self, num_class, hidden_dim, bias=True):
super().__init__()
self.num_class = num_class
self.hidden_dim = hidden_dim
self.bias = bias
self.W = nn.Parameter(torch.Tensor(1, num_class, hidden_dim))
if bias:
self.b = nn.Parameter(torch.Tensor(1, num_class))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.W.size(2))
for i in range(self.num_class):
self.W[0][i].data.uniform_(-stdv, stdv)
if self.bias:
for i in range(self.num_class):
self.b[0][i].data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_1 = self.W
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ckvic3/query2labels | GroupWiseLinear | false | 1,720 | [
"MIT"
] | 0 | e9c30e1b445be773be397a093fa66aef71d54556 | https://github.com/ckvic3/query2labels/tree/e9c30e1b445be773be397a093fa66aef71d54556 |
ProbabilityLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xq/cxqnkvz6ksfxzcebv4n77pe5u6roargnnmapis6ymq4r6b5krclq.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weight => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lz/clzpvkxf6ex2lbapm65egvjtcerb3lrxuftxtlzwwccfjna7dphr.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weight => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oz/cozjc4ycw2grlt7fkhcnju5hvypgulnx2brarj6h6lc3jrra4q65.py
# Topologically Sorted Source Nodes: [sum_1, output_1], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# output_1 => div_1
# sum_1 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [-1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %sum_2), kwargs = {})
triton_poi_fused_div_sum_2 = async_compile.triton('triton_poi_fused_div_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
del buf0
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, output_1], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](primals_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_sum_2[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf3, primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf2
def normalize_prob(a, dim=-1):
"""Perform 1-norm along the specific dimension."""
return a / a.sum(dim=dim, keepdim=True)
class ProbabilityLinearNew(nn.Linear):
def __init__(self, in_features, out_features, bias=False, norm=True):
assert bias is False, 'Bias regularization for SOFTMAX is not implemented.'
super().__init__(in_features, out_features, bias)
self.norm = norm
def _regulize_parameter(self, p):
return F.softmax(p, dim=0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| ashutosh1919/neuro-symbolic-sudoku-solver | ProbabilityLinear | false | 14,906 | [
"Apache-2.0"
] | 52 | ecb4274ff66d3b6a86f64584e0a767bf785f107f | https://github.com/ashutosh1919/neuro-symbolic-sudoku-solver/tree/ecb4274ff66d3b6a86f64584e0a767bf785f107f |
Decoder4 | import torch
import torch.nn as nn
class Decoder4(nn.Module):
def __init__(self, model=None, fixed=False):
super(Decoder4, self).__init__()
self.fixed = fixed
self.conv41 = nn.Conv2d(512, 256, 3, 1, 0)
self.conv34 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv33 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv32 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv31 = nn.Conv2d(256, 128, 3, 1, 0)
self.conv22 = nn.Conv2d(128, 128, 3, 1, 0)
self.conv21 = nn.Conv2d(128, 64, 3, 1, 0)
self.conv12 = nn.Conv2d(64, 64, 3, 1, 0)
self.conv11 = nn.Conv2d(64, 3, 3, 1, 0)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.pad = nn.ReflectionPad2d((1, 1, 1, 1))
if model:
self.load_state_dict(torch.load(model, map_location=lambda
storage, location: storage))
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
y = self.relu(self.conv41(self.pad(input)))
y = self.unpool(y)
y = self.relu(self.conv34(self.pad(y)))
y = self.relu(self.conv33(self.pad(y)))
y = self.relu(self.conv32(self.pad(y)))
y = self.relu(self.conv31(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv22(self.pad(y)))
y = self.relu(self.conv21(self.pad(y)))
y = self.unpool(y)
y = self.relu(self.conv12(self.pad(y)))
y = self.relu(self.conv11(self.pad(y)))
return y
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1
))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0
))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 34 % 34
x0 = xindex % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0,
73728, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid
(102400)](buf2, buf1, primals_3, buf3, 102400, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1))
buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf4
, primals_5, buf5, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf6
, primals_7, buf7, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1))
buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf8
, primals_9, buf9, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1))
buf11 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid
(165888)](buf11, buf10, primals_11, buf12, 165888, XBLOCK=512,
num_warps=8, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(165888)](
buf13, primals_13, buf14, 165888, XBLOCK=1024, num_warps=4,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1))
buf16 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf16, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid
(295936)](buf16, buf15, primals_15, buf17, 295936, XBLOCK=512,
num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(295936)](
buf18, primals_17, buf19, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf21 = buf20
del buf20
buf22 = empty_strided_cuda((4, 3, 32, 32), (3072, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(12288)](
buf21, primals_19, buf22, 12288, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_19
buf23 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(262144)](
buf18, primals_17, buf23, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_17
buf24 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(65536)](
buf15, primals_15, buf24, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf15
del primals_15
buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_13[grid(131072)](
buf13, primals_13, buf25, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_13
buf26 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_14[grid(32768)](
buf10, primals_11, buf26, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf10
del primals_11
buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf8, primals_9, buf27, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf8
del primals_9
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf6, primals_7, buf28, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf4, primals_5, buf29, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf30 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(16384)](
buf1, primals_3, buf30, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_3
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3,
buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22,
buf23, buf24, buf25, buf26, buf27, buf28, buf29, buf30)
class Decoder4New(nn.Module):
def __init__(self, model=None, fixed=False):
super(Decoder4New, self).__init__()
self.fixed = fixed
self.conv41 = nn.Conv2d(512, 256, 3, 1, 0)
self.conv34 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv33 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv32 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv31 = nn.Conv2d(256, 128, 3, 1, 0)
self.conv22 = nn.Conv2d(128, 128, 3, 1, 0)
self.conv21 = nn.Conv2d(128, 64, 3, 1, 0)
self.conv12 = nn.Conv2d(64, 64, 3, 1, 0)
self.conv11 = nn.Conv2d(64, 3, 3, 1, 0)
self.relu = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.pad = nn.ReflectionPad2d((1, 1, 1, 1))
if model:
self.load_state_dict(torch.load(model, map_location=lambda
storage, location: storage))
if fixed:
for param in self.parameters():
param.requires_grad = False
def forward(self, input_0):
primals_2 = self.conv41.weight
primals_3 = self.conv41.bias
primals_4 = self.conv34.weight
primals_5 = self.conv34.bias
primals_6 = self.conv33.weight
primals_7 = self.conv33.bias
primals_8 = self.conv32.weight
primals_9 = self.conv32.bias
primals_10 = self.conv31.weight
primals_11 = self.conv31.bias
primals_12 = self.conv22.weight
primals_13 = self.conv22.bias
primals_14 = self.conv21.weight
primals_15 = self.conv21.bias
primals_16 = self.conv12.weight
primals_17 = self.conv12.bias
primals_18 = self.conv11.weight
primals_19 = self.conv11.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
| EndyWon/Texture-Reformer | Decoder4 | false | 8,165 | [
"MIT"
] | 11 | f84f95accb3574c7b759a7f03c0b0b4e150314b5 | https://github.com/EndyWon/Texture-Reformer/tree/f84f95accb3574c7b759a7f03c0b0b4e150314b5 |
PixelNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_3/inductor_cache/ss/cssyjjl3mw4nt7jmyffj4insp57ypdwdrjatqjapjpniogymqic5.py
# Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, truediv], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# pow_1 => pow_1
# sqrt => sqrt
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {})
triton_poi_fused_add_div_mean_pow_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_mean_pow_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_pow_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, truediv], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelNormNew(nn.Module):
def __init__(self):
super(PixelNormNew, self).__init__()
self.epsilon = 1e-08
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| AjaybirRandhawa/Face-Generator | PixelNorm | false | 18,399 | [
"Apache-2.0"
] | 2 | 9cac0822b6e6337c3599e949154ce44eeae5746b | https://github.com/AjaybirRandhawa/Face-Generator/tree/9cac0822b6e6337c3599e949154ce44eeae5746b |
LinearPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3u/c3ubhztv6npzgtnviydzbdg2mikvp6gcelukqxtrq2fawmzbasgw.py
# Topologically Sorted Source Nodes: [sum_input, sum_input_1, linear_weight, weighted_value, sum_2], Original ATen: [aten.sum, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# linear_weight => div
# sum_2 => sum_2
# sum_input => sum_1
# sum_input_1 => add
# weighted_value => mul
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [-1, -2], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-07), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1, -2], True), kwargs = {})
triton_per_fused_add_div_mul_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-07
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp8 = tmp0 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(in_out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_input, sum_input_1, linear_weight, weighted_value, sum_2], Original ATen: [aten.sum, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_sum_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-07
tmp6 = tmp4 + tmp5
tmp7 = tmp0 / tmp6
tmp8 = tmp0 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(in_out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_sum_0[grid(16)](buf1, arg0_1, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LinearPoolNew(nn.Module):
def __init__(self):
super(LinearPoolNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| iampartho/EEE426 | LinearPool | false | 3,636 | [
"Apache-2.0"
] | 0 | a706660c0efcd4adea44d54c57a34bcaa4439ec1 | https://github.com/iampartho/EEE426/tree/a706660c0efcd4adea44d54c57a34bcaa4439ec1 |
Net | import torch
from torch import nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 10)
self.droput = nn.Dropout(0.2)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = self.droput(x)
x = F.relu(self.fc2(x))
x = self.droput(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (512, 784), (784, 1))
assert_size_stride(primals_3, (512,), (1,))
assert_size_stride(primals_4, (512, 512), (512, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (10, 512), (512, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
512), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 512), (
1, 512), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(2048)](buf3, primals_5, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(512, 10), (1, 512), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.fc1 = nn.Linear(28 * 28, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, 10)
self.droput = nn.Dropout(0.2)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| liguodongIOT/nlp-app-samples | Net | false | 7,086 | [
"Apache-2.0"
] | 1 | e0cc747e88c7b5c701b5099462d2dd6277c23381 | https://github.com/liguodongIOT/nlp-app-samples/tree/e0cc747e88c7b5c701b5099462d2dd6277c23381 |
AttBlockV2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/ak/cakpca4eo6izghuc2gyprh5fzpktzalyrpynoedxva3limqncjzp.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/fk/cfkeshoixhxhvrzqmcrge6zi44aiq5hxgsv45ies4htjwx422bia.py
# Topologically Sorted Source Nodes: [tanh, norm_att], Original ATen: [aten.tanh, aten._softmax]
# Source node to ATen node mapping:
# norm_att => amax, exp, sub
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%tanh, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%tanh, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_tanh_1 = async_compile.triton('triton_poi_fused__softmax_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = libdevice.tanh(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = libdevice.tanh(tmp7)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = libdevice.tanh(tmp10)
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tl_math.exp(tmp13)
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/xl/cxls6dl5dz3ua4ilno7rjcfd6m7p4ydnd3mzfaq2cepnph6e2y7h.py
# Topologically Sorted Source Nodes: [norm_att], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# norm_att => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/t2/ct2zbhdobwyl2ppd2kmxqrd7ifmna5qulpdko6cfgpjgpfprjkj2.py
# Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# x => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %convolution_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, norm_att], Original ATen: [aten.tanh, aten._softmax]
triton_poi_fused__softmax_tanh_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_att], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf2
# Topologically Sorted Source Nodes: [cla], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [cla], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf5, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf3, buf5, buf6, 16, grid=grid(16), stream=stream0)
return (buf6, buf3, buf5, primals_1, primals_3, primals_4, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = libdevice.tanh(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = libdevice.tanh(tmp7)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = libdevice.tanh(tmp10)
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tl_math.exp(tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_tanh_1[grid(64)](buf1, buf2, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf4 = extern_kernels.convolution(primals_3, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_0[grid(64)](buf5, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sum_3[grid(16)](buf3, buf5, buf6, 16, XBLOCK=
16, num_warps=1, num_stages=1)
return buf6, buf3, buf5, primals_1, primals_3, primals_4, buf1, buf3, buf5
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.0)
class AttBlockV2New(nn.Module):
def __init__(self, in_features: 'int', out_features: 'int', activation=
'linear'):
super().__init__()
self.activation = activation
self.att = nn.Conv1d(in_channels=in_features, out_channels=
out_features, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=in_features, out_channels=
out_features, kernel_size=1, stride=1, padding=0, bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
def forward(self, input_0):
primals_1 = self.att.weight
primals_2 = self.att.bias
primals_4 = self.cla.weight
primals_5 = self.cla.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| EMUNES/Auto-Subtitle-File-Generation | AttBlockV2 | false | 8,060 | [
"Apache-2.0"
] | 33 | 535a6351f450b1970da50bbbf4cc6d2f442ec335 | https://github.com/EMUNES/Auto-Subtitle-File-Generation/tree/535a6351f450b1970da50bbbf4cc6d2f442ec335 |
Spatial_Attention_layer | import torch
from torch import nn
import torch.nn.functional as F
class Spatial_Attention_layer(nn.Module):
"""
compute spatial attention scores
"""
def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps):
super(Spatial_Attention_layer, self).__init__()
self.W1 = nn.Parameter(torch.FloatTensor(num_of_timesteps))
self.W2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_timesteps)
)
self.W3 = nn.Parameter(torch.FloatTensor(in_channels))
self.bs = nn.Parameter(torch.FloatTensor(1, num_of_vertices,
num_of_vertices))
self.Vs = nn.Parameter(torch.FloatTensor(num_of_vertices,
num_of_vertices))
def forward(self, x):
"""
:param x: (batch_size, N, F_in, T)
:return: (B,N,N)
"""
lhs = torch.matmul(torch.matmul(x, self.W1), self.W2)
rhs = torch.matmul(self.W3, x).transpose(-1, -2)
product = torch.matmul(lhs, rhs)
S = torch.matmul(self.Vs, torch.sigmoid(product + self.bs))
S_normalized = F.softmax(S, dim=1)
return S_normalized
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'DEVICE': 4, 'in_channels': 4, 'num_of_vertices': 4,
'num_of_timesteps': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x0, tmp18, xmask)
@triton.jit
def triton_poi_fused_mv_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * (x0 // 4) + x0 % 4), xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (4 + 16 * (x0 // 4) + x0 % 4), xmask)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (8 + 16 * (x0 // 4) + x0 % 4), xmask)
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (12 + 16 * (x0 // 4) + x0 % 4), xmask)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tmp17 = tmp14 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x0, tmp18, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_mv_0[grid(64)](primals_2, primals_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
primals_3, out=buf1)
buf2 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused_mv_1[grid(64)](primals_2, primals_4, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, primals_6, buf4, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused__softmax_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_4[grid(16, 4)](buf6, buf7, 16, 4, XBLOCK=
4, YBLOCK=16, num_warps=1, num_stages=1)
del buf6
return buf7, primals_2, primals_6, buf3, reinterpret_tensor(buf4, (16,
4), (4, 1), 0), buf7, primals_5, reinterpret_tensor(buf1, (4, 4, 4),
(16, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), reinterpret_tensor(
primals_3, (4, 4), (1, 4), 0)
class Spatial_Attention_layerNew(nn.Module):
"""
compute spatial attention scores
"""
def __init__(self, DEVICE, in_channels, num_of_vertices, num_of_timesteps):
super(Spatial_Attention_layerNew, self).__init__()
self.W1 = nn.Parameter(torch.FloatTensor(num_of_timesteps))
self.W2 = nn.Parameter(torch.FloatTensor(in_channels, num_of_timesteps)
)
self.W3 = nn.Parameter(torch.FloatTensor(in_channels))
self.bs = nn.Parameter(torch.FloatTensor(1, num_of_vertices,
num_of_vertices))
self.Vs = nn.Parameter(torch.FloatTensor(num_of_vertices,
num_of_vertices))
def forward(self, input_0):
primals_1 = self.W1
primals_3 = self.W2
primals_4 = self.W3
primals_6 = self.bs
primals_5 = self.Vs
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| abcdefg-dev-dd/asxdcvfg | Spatial_Attention_layer | false | 6,056 | [
"Apache-2.0"
] | 1 | 83421d4a133810968d6e04b256a9312895452941 | https://github.com/abcdefg-dev-dd/asxdcvfg/tree/83421d4a133810968d6e04b256a9312895452941 |
UGRNNLRCell | import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class UGRNNLRCell(RNNCell):
"""
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='UGRNNLR'):
super(UGRNNLRCell, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'UGRNNLR'
def forward(self, input, state):
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
else:
wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2)
if self._uRank is None:
uComp1 = torch.matmul(state, self.U1)
uComp2 = torch.matmul(state, self.U2)
else:
uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1)
uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
z = gen_nonlinearity(pre_comp1 + self.bias_gate, self.
_gate_nonlinearity)
c = gen_nonlinearity(pre_comp2 + self.bias_update, self.
_update_nonlinearity)
new_h = z * state + (1.0 - z) * c
return new_h
def getVars(self):
Vars = []
if self._num_W_matrices == 2:
Vars.extend([self.W1, self.W2])
else:
Vars.extend([self.W, self.W1, self.W2])
if self._num_U_matrices == 2:
Vars.extend([self.U1, self.U2])
else:
Vars.extend([self.U, self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
return Vars
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp5
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
primals_6, out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf4, buf5,
buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_7
del primals_8
return buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4,
64), (1, 4), 0)
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class UGRNNLRCellNew(RNNCell):
"""
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='UGRNNLR'):
super(UGRNNLRCellNew, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'UGRNNLR'
def getVars(self):
Vars = []
if self._num_W_matrices == 2:
Vars.extend([self.W1, self.W2])
else:
Vars.extend([self.W, self.W1, self.W2])
if self._num_U_matrices == 2:
Vars.extend([self.U1, self.U2])
else:
Vars.extend([self.U, self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
return Vars
def forward(self, input_0, input_1):
primals_1 = self.W1
primals_3 = self.W2
primals_4 = self.U1
primals_6 = self.U2
primals_7 = self.bias_gate
primals_8 = self.bias_update
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| adityakusupati/EdgeML | UGRNNLRCell | false | 3,029 | [
"MIT"
] | 0 | 65933a6fdfc38945f4311043a62e120784b2b0bf | https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf |
CosineSimilarityLoss | import torch
from torch import nn
class CosineSimilarityLoss(nn.Module):
def __init__(self):
super(CosineSimilarityLoss, self).__init__()
def forward(self, x1, x2):
return 0.5 - 0.5 * torch.cosine_similarity(x1, x2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x3, xmask)
tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused_mul_rsub_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp7 - tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_rsub_sum_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf1,
class CosineSimilarityLossNew(nn.Module):
def __init__(self):
super(CosineSimilarityLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| DunZhang/KnowledgeDistillation | CosineSimilarityLoss | false | 8,006 | [
"MIT"
] | 31 | 47a9dd0f51021001b53e3a76c9347eb3131f1f72 | https://github.com/DunZhang/KnowledgeDistillation/tree/47a9dd0f51021001b53e3a76c9347eb3131f1f72 |
FeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/4g/c4guhk7x6skkidedvs2gxz2kcu6gb76l3ig5crjjvjtzvnjlhlte.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class FeedForwardNew(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.linear1 = Linear(d_model, d_hidden)
self.linear2 = Linear(d_hidden, d_model)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| MichiganCOG/Video-Grounding | FeedForward | false | 8,550 | [
"MIT"
] | 41 | 3e0ec0b69578a59be583911590354fe77d357cab | https://github.com/MichiganCOG/Video-Grounding/tree/3e0ec0b69578a59be583911590354fe77d357cab |
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# a => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cp/ccp5m5apf7ka2skqyfxhf2df54c52qocprpycry7jrzoptyjvbti.py
# Topologically Sorted Source Nodes: [tanh, mul], Original ATen: [aten.tanh, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {})
triton_poi_fused_mul_tanh_1 = async_compile.triton('triton_poi_fused_mul_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 16384, grid=grid(16384), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, mul], Original ATen: [aten.tanh, aten.mul]
triton_poi_fused_mul_tanh_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf4, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), buf4, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ChristianLin0420/DeepRL | Actor | false | 2,105 | [
"MIT"
] | 0 | 143a9bfebd264229d9d26fcdc070065225774e04 | https://github.com/ChristianLin0420/DeepRL/tree/143a9bfebd264229d9d26fcdc070065225774e04 |
GRUStep | import torch
import torch.nn as nn
class GRUStep(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStep, self).__init__()
"""GRU module"""
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
def forward(self, h_state, input_):
z = torch.sigmoid(self.linear_z(torch.cat([h_state, input_], -1)))
r = torch.sigmoid(self.linear_r(torch.cat([h_state, input_], -1)))
t = torch.tanh(self.linear_t(torch.cat([r * h_state, input_], -1)))
h_state = (1 - z) * h_state + z * t
return h_state
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](buf2, primals_1, primals_2, buf3,
512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(256)](buf1,
primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf1, buf2, reinterpret_tensor(buf3, (64, 8), (8, 1), 0
), buf4, primals_5
class GRUStepNew(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStepNew, self).__init__()
"""GRU module"""
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
def forward(self, input_0, input_1):
primals_3 = self.linear_z.weight
primals_4 = self.linear_r.weight
primals_5 = self.linear_t.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| siyangZhao/BAMnet | GRUStep | false | 16,454 | [
"Apache-2.0"
] | 170 | 4c6222610c120a4a114daf40938219ea0ca57dc6 | https://github.com/siyangZhao/BAMnet/tree/4c6222610c120a4a114daf40938219ea0ca57dc6 |
CosineLoss | import torch
import torch.nn as nn
class CosineLoss(nn.Module):
cos = nn.CosineSimilarity(dim=2, eps=1e-06)
def forward(self, pred, target, mask):
pred = torch.mul(pred, mask.unsqueeze(2))
return (1.0 - self.cos(pred, target)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16 % 4
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + r3, None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr2 + (r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-06
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tmp2 / tmp18
tmp21 = tmp20 * tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp29 = tmp28 * tmp28
tmp30 = tmp27 + tmp29
tmp31 = libdevice.sqrt(tmp30)
tmp32 = triton_helpers.maximum(tmp31, tmp17)
tmp33 = tmp20 / tmp32
tmp34 = tmp19 * tmp33
tmp35 = tmp5 / tmp18
tmp36 = tmp22 / tmp32
tmp37 = tmp35 * tmp36
tmp38 = tmp34 + tmp37
tmp39 = tmp9 / tmp18
tmp40 = tmp25 / tmp32
tmp41 = tmp39 * tmp40
tmp42 = tmp38 + tmp41
tmp43 = tmp13 / tmp18
tmp44 = tmp28 / tmp32
tmp45 = tmp43 * tmp44
tmp46 = tmp42 + tmp45
tmp47 = 1.0
tmp48 = tmp47 - tmp46
tmp49 = tl.broadcast_to(tmp48, [RBLOCK])
tmp51 = triton_helpers.promote_to_tensor(tl.sum(tmp49, 0))
tmp52 = 256.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_clamp_min_div_linalg_vector_norm_mean_mul_rsub_sum_0[
grid(1)](buf3, arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class CosineLossNew(nn.Module):
cos = nn.CosineSimilarity(dim=2, eps=1e-06)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| vegetablejuiceftw/soft-pointer-networks | CosineLoss | false | 11,074 | [
"MIT"
] | 0 | 9705d9688b6b69db3948172771df4c367165c948 | https://github.com/vegetablejuiceftw/soft-pointer-networks/tree/9705d9688b6b69db3948172771df4c367165c948 |
NeighborNormLayer | import torch
import torch.nn as nn
class NeighborNormLayer(nn.Module):
"""Normalization layer that divides the output of a
preceding layer by the number of neighbor features.
Unlike the SimpleNormLayer, this layer allows for
dynamically changing number of neighbors during training.
"""
def __init__(self):
super(NeighborNormLayer, self).__init__()
def forward(self, input_features, n_neighbors):
"""Computes normalized output
Parameters
----------
input_features: torch.Tensor
Input tensor of featuers of shape
(n_frames, n_beads, n_feats)
n_neighbors: int
the number of neighbors
Returns
-------
normalized_features: torch.Tensor
Normalized input features
"""
return input_features / n_neighbors
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class NeighborNormLayerNew(nn.Module):
"""Normalization layer that divides the output of a
preceding layer by the number of neighbor features.
Unlike the SimpleNormLayer, this layer allows for
dynamically changing number of neighbors during training.
"""
def __init__(self):
super(NeighborNormLayerNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bokutotu/cgnet | NeighborNormLayer | false | 1,563 | [
"BSD-3-Clause"
] | 0 | a35170001d969d51548dd01522b1ab93e43741b4 | https://github.com/bokutotu/cgnet/tree/a35170001d969d51548dd01522b1ab93e43741b4 |
Mlp | import torch
import torch.nn as nn
class Conv1d(nn.Module):
def __init__(self, nf, nx, stdev=0.02):
super().__init__()
self.nf = nf
self.nx = nx
self.stdev = stdev
self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean
=0.0, std=self.stdev))
self.b = nn.Parameter(torch.zeros([self.nf]))
def forward(self, x: 'torch.Tensor'):
shape = x.size()
start, nx = shape[:-1], shape[-1]
return torch.reshape(torch.matmul(torch.reshape(x, [-1, nx]), torch
.reshape(self.w, [-1, self.nf])) + self.b, start + (self.nf,))
class Mlp(nn.Module):
def __init__(self, input_dim, proj_dim):
super().__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.conv_fc = Conv1d(self.proj_dim, self.input_dim)
self.conv_proj = Conv1d(self.input_dim, self.proj_dim)
def forward(self, x):
h = nn.functional.gelu(self.conv_fc(x))
return self.conv_proj(h)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'proj_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (4, 1), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, primals_3, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_3, buf0, reinterpret_tensor(buf1, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
class Conv1d(nn.Module):
def __init__(self, nf, nx, stdev=0.02):
super().__init__()
self.nf = nf
self.nx = nx
self.stdev = stdev
self.w = nn.Parameter(torch.normal(size=[1, self.nx, self.nf], mean
=0.0, std=self.stdev))
self.b = nn.Parameter(torch.zeros([self.nf]))
def forward(self, x: 'torch.Tensor'):
shape = x.size()
start, nx = shape[:-1], shape[-1]
return torch.reshape(torch.matmul(torch.reshape(x, [-1, nx]), torch
.reshape(self.w, [-1, self.nf])) + self.b, start + (self.nf,))
class MlpNew(nn.Module):
def __init__(self, input_dim, proj_dim):
super().__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.conv_fc = Conv1d(self.proj_dim, self.input_dim)
self.conv_proj = Conv1d(self.input_dim, self.proj_dim)
def forward(self, input_0):
primals_2 = self.conv_fc.w
primals_3 = self.conv_fc.b
primals_4 = self.conv_proj.w
primals_5 = self.conv_proj.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Aalanli/MusicGeneration | Mlp | false | 9 | [
"MIT"
] | 0 | 7d268322d692013d8ac6e70be31741cea519fa28 | https://github.com/Aalanli/MusicGeneration/tree/7d268322d692013d8ac6e70be31741cea519fa28 |
LinearEmbedding | import math
import torch
import torch.utils.data
import torch.nn as nn
class LinearEmbedding(nn.Module):
def __init__(self, inp_size, d_model):
super(LinearEmbedding, self).__init__()
self.lut = nn.Linear(inp_size, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp_size': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class LinearEmbeddingNew(nn.Module):
def __init__(self, inp_size, d_model):
super(LinearEmbeddingNew, self).__init__()
self.lut = nn.Linear(inp_size, d_model)
self.d_model = d_model
def forward(self, input_0):
primals_1 = self.lut.weight
primals_2 = self.lut.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| flyslowly/Trajectory-Transformer | LinearEmbedding | false | 10,073 | [
"MIT"
] | 0 | 8a5772e67366854155eb3f9a0ebff08c3e9f9186 | https://github.com/flyslowly/Trajectory-Transformer/tree/8a5772e67366854155eb3f9a0ebff08c3e9f9186 |
ConvSig | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/6q/c6qtcqlhzp2spzvdv6knpwm32alhfras7qga7rybepicf6poy4sy.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, 256, grid=grid(256), stream=stream0)
return (buf1, primals_1, primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf1
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [(x // 2) for x in k]
return p
class ConvSigNew(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(ConvSigNew, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False
)
self.act = nn.Sigmoid() if act else nn.Identity()
def fuseforward(self, x):
return self.act(self.conv(x))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Beaver48/kaggle-chest-xray-abnormalities | ConvSig | false | 11,314 | [
"MIT"
] | 0 | d41f32d1c59cb5c925795df3291e929b3ea6d5fd | https://github.com/Beaver48/kaggle-chest-xray-abnormalities/tree/d41f32d1c59cb5c925795df3291e929b3ea6d5fd |
QueryEncoding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zo/czoio6nsflvecb2qwhe3w75tplfaiyoo6yyqhaoz72bqpuhuhntr.py
# Topologically Sorted Source Nodes: [idx, setitem], Original ATen: [aten._to_copy, aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# idx => full_default
# setitem => copy, full_default_1
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select, %full_default_1), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %copy, 1, 0), kwargs = {})
triton_poi_fused__to_copy_fill_lift_fresh_0 = async_compile.triton('triton_poi_fused__to_copy_fill_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_fill_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mi/cmizcvmhsff6tf5i4xjvyisvdrmrwptxodpk2wr34xc6hh42qeky.py
# Topologically Sorted Source Nodes: [embedding, x], Original ATen: [aten.embedding, aten.add]
# Source node to ATen node mapping:
# embedding => embedding
# x => add
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %select_scatter_default), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {})
triton_poi_fused_add_embedding_1 = async_compile.triton('triton_poi_fused_add_embedding_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = (xindex // 16) % 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = x2
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp1 == tmp2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + (4*tmp6)), xmask)
tmp8 = tmp0 + tmp7
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [idx, setitem], Original ATen: [aten._to_copy, aten.lift_fresh, aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_fill_lift_fresh_0.run(buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, x], Original ATen: [aten.embedding, aten.add]
triton_poi_fused_add_embedding_1.run(primals_1, primals_2, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 16 % 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = x2
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp1 == tmp2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + 4 * tmp6), xmask)
tmp8 = tmp0 + tmp7
tl.store(out_ptr0 + x4, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_fill_lift_fresh_0[grid(64)](buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_embedding_1[grid(256)](primals_1, primals_2,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf1, buf0
class QueryEncodingNew(nn.Module):
def __init__(self, d_model):
super(QueryEncodingNew, self).__init__()
self.pe = nn.Embedding(2, d_model)
def forward(self, input_0):
primals_2 = self.pe.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| wukevin/RoseTTAFold | QueryEncoding | false | 4,551 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 |
_UpsampleLinear | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class _UpsampleLinear(nn.Module):
def __init__(self, scale):
super(_UpsampleLinear, self).__init__()
self._mode = 'linear', 'bilinear', 'trilinear'
self.scale = scale
def forward(self, x, scale=None):
scale = scale or self.scale
mode = self._mode[x.dim() - 3]
return F.interpolate(x, scale_factor=scale, mode=mode,
align_corners=False)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = triton_helpers.minimum(tmp28, tmp4)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp9.to(tl.float32)
tmp39 = tmp8 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp7)
tmp41 = triton_helpers.minimum(tmp40, tmp4)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(256)](buf2, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf2,
class _UpsampleLinearNew(nn.Module):
def __init__(self, scale):
super(_UpsampleLinearNew, self).__init__()
self._mode = 'linear', 'bilinear', 'trilinear'
self.scale = scale
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cestcedric/TSSR-GAN | _UpsampleLinear | false | 1,657 | [
"BSD-2-Clause",
"MIT"
] | 0 | d6e1b50409e0f0591660552993e6d5b70d41e766 | https://github.com/cestcedric/TSSR-GAN/tree/d6e1b50409e0f0591660552993e6d5b70d41e766 |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/s7/cs7jv7spsytbq3ouvdhla2tcr7wzgoznysid6m7rapuqn7g7cc3h.py
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersection => mul
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vq/cvqiixp4wmb73ig2cla6idbqq7i6vd5n3qmdluadrv32f52pdgw3.py
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, loss, sum_4, truediv_1, loss_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# loss => div
# loss_1 => sub
# mul_1 => mul_1
# sum_4 => sum_4
# truediv_1 => div_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, loss, sum_4, truediv_1, loss_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| lee-zq/VesselSeg-pytorch | DiceLoss | false | 15,886 | [
"Apache-2.0"
] | 83 | b4f6571fc1fb1fbdaad60ff9282a54a1f1c455fa | https://github.com/lee-zq/VesselSeg-pytorch/tree/b4f6571fc1fb1fbdaad60ff9282a54a1f1c455fa |
SimpleBody | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 2048, grid=grid(2048), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf2, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class SimpleBodyNew(nn.Module):
def __init__(self, num_channels):
super(SimpleBodyNew, self).__init__()
self.out_feats = 32
self.fc1 = nn.Linear(num_channels, self.out_feats)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Michaelrising/sac-discrete.pytorch | SimpleBody | false | 9,314 | [
"MIT"
] | 0 | 93ae779f5980726db0302c3471fd143c7d1d35ed | https://github.com/Michaelrising/sac-discrete.pytorch/tree/93ae779f5980726db0302c3471fd143c7d1d35ed |
InitialSpanEncoder | import torch
from torch import Tensor
from torch.nn.modules.transformer import TransformerEncoderLayer
class InitialSpanEncoder(TransformerEncoderLayer):
"""
The initial layer for the Segmental Transformer Encoder. Representations of
the source sequence attend over all unmasked positions in the sequence
The encoding at position ``i`` represents the masked span starting at
position ``i+1``
Args:
src: The input sequence to encode
attn_mask: The additive attention mask with which to mask out the
span encoded at each position. Default: ``None``
padding_mask: The mask for the padded positions of each key.
Default: ``None``
"""
def forward(self, src: 'Tensor', attn_mask: 'Tensor'=None, padding_mask:
'Tensor'=None) ->Tensor:
src1 = self.self_attn(src, src, src, attn_mask=attn_mask,
key_padding_mask=padding_mask)[0]
src = self.norm1(self.dropout1(src1))
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = self.norm2(src + self.dropout2(src2))
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn.modules.transformer import TransformerEncoderLayer
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (2048, 4), (4, 1))
assert_size_stride(primals_9, (2048,), (1,))
assert_size_stride(primals_10, (4, 2048), (2048, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_4[grid(4)](buf9, buf10, buf11, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf9, buf10, buf11,
primals_6, primals_7, buf12, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_7
buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (4, 2048), (
1, 4), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_relu_6[grid(8192)](buf14, primals_9, 8192, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (2048, 4),
(1, 2048), 0), out=buf15)
buf16 = buf15
del buf15
triton_poi_fused_add_7[grid(16)](buf16, buf12, primals_11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
buf17 = buf11
del buf11
buf18 = buf10
del buf10
triton_poi_fused_native_layer_norm_4[grid(4)](buf16, buf17, buf18,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_5[grid(16)](buf16, buf17, buf18,
primals_12, primals_13, buf19, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf17
del buf18
del primals_13
return (buf19, primals_6, primals_12, primals_1, buf6,
reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf12, buf14,
buf16, primals_10, primals_8, primals_4, reinterpret_tensor(buf2, (
4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1,
4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class InitialSpanEncoderNew(TransformerEncoderLayer):
"""
The initial layer for the Segmental Transformer Encoder. Representations of
the source sequence attend over all unmasked positions in the sequence
The encoding at position ``i`` represents the masked span starting at
position ``i+1``
Args:
src: The input sequence to encode
attn_mask: The additive attention mask with which to mask out the
span encoded at each position. Default: ``None``
padding_mask: The mask for the padded positions of each key.
Default: ``None``
"""
def forward(self, input_0):
primals_2 = self.self_attn.in_proj_weight
primals_3 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_8 = self.linear1.weight
primals_9 = self.linear1.bias
primals_10 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.norm1.weight
primals_11 = self.norm1.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| cmdowney88/XLSLM | InitialSpanEncoder | false | 3,301 | [
"MIT"
] | 0 | 7fe266bd0f0ad8a79a30052a18104b974d1c32e8 | https://github.com/cmdowney88/XLSLM/tree/7fe266bd0f0ad8a79a30052a18104b974d1c32e8 |
output | import math
import torch
import torch.nn as nn
class output(nn.Module):
def __init__(self, scope=512):
super(output, self).__init__()
self.conv1 = nn.Conv2d(32, 1, 1)
self.sigmoid1 = nn.Sigmoid()
self.conv2 = nn.Conv2d(32, 4, 1)
self.sigmoid2 = nn.Sigmoid()
self.conv3 = nn.Conv2d(32, 1, 1)
self.sigmoid3 = nn.Sigmoid()
self.scope = 512
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
score = self.sigmoid1(self.conv1(x))
loc = self.sigmoid2(self.conv2(x)) * self.scope
angle = (self.sigmoid3(self.conv3(x)) - 0.5) * math.pi
geo = torch.cat((loc, angle), 1)
return score, geo
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 5
x0 = xindex % 4096
x2 = xindex // 20480
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = 512.0
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp14 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp11, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.sigmoid(tmp14)
tmp16 = 0.5
tmp17 = tmp15 - tmp16
tmp18 = 3.141592653589793
tmp19 = tmp17 * tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp11, tmp19, tmp20)
tmp22 = tl.where(tmp4, tmp10, tmp21)
tl.store(out_ptr0 + x3, tmp22, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_4, (4, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(65536)](buf3, primals_5, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(primals_3, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16384)](buf5, primals_7, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 5, 64, 64), (20480, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_3[grid(81920)](buf3, buf5, buf6, 81920, XBLOCK
=512, num_warps=8, num_stages=1)
return (buf1, buf6, primals_1, primals_3, primals_4, primals_6, buf1,
buf3, buf5)
class outputNew(nn.Module):
def __init__(self, scope=512):
super(outputNew, self).__init__()
self.conv1 = nn.Conv2d(32, 1, 1)
self.sigmoid1 = nn.Sigmoid()
self.conv2 = nn.Conv2d(32, 4, 1)
self.sigmoid2 = nn.Sigmoid()
self.conv3 = nn.Conv2d(32, 1, 1)
self.sigmoid3 = nn.Sigmoid()
self.scope = 512
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
outputNew = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return outputNew[0], outputNew[1]
| binzh93/EAST | output | false | 3,229 | [
"MIT"
] | 0 | b5f66ab1a5dd37b6a5134336d494000e1add6da1 | https://github.com/binzh93/EAST/tree/b5f66ab1a5dd37b6a5134336d494000e1add6da1 |
ChannelNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/i7/ci7t7iiz7rvr7feeg7u3oqbndzrc2eexgichqwatlcys5unofv7u.py
# Topologically Sorted Source Nodes: [mean, pow_1, mean_x2, pow_2, var, sub_1, add, sqrt, x_norm, mul, x_norm_1], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mean_x2 => mean_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sqrt => sqrt
# sub_1 => sub_1
# var => sub
# x_norm => div
# x_norm_1 => add_1
# Graph fragment:
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_1, %pow_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_2), kwargs = {})
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp18 = tl.load(in_ptr1 + (0))
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp23 = tl.load(in_ptr2 + (0))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp0 * tmp0
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp4 / tmp10
tmp12 = tmp9 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp20 = tmp0 - tmp11
tmp21 = tmp20 / tmp17
tmp22 = tmp19 * tmp21
tmp25 = tmp22 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp17, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 1, 64), (64, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, pow_1, mean_x2, pow_2, var, sub_1, add, sqrt, x_norm, mul, x_norm_1], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0.run(buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp18 = tl.load(in_ptr1 + 0)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp23 = tl.load(in_ptr2 + 0)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp0 * tmp0
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp4 / tmp10
tmp12 = tmp9 / tmp10
tmp13 = tmp11 * tmp11
tmp14 = tmp12 - tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp20 = tmp0 - tmp11
tmp21 = tmp20 / tmp17
tmp22 = tmp19 * tmp21
tmp25 = tmp22 + tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp17, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp25, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 1, 64), (64, 64, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0[grid(4)](buf1,
buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, buf1, buf3
class ChannelNormNew(Module):
"""
## Channel Normalization
This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise.
"""
def __init__(self, channels, groups, eps: 'float'=1e-05, affine: 'bool'
=True):
"""
* `groups` is the number of groups the features are divided into
* `channels` is the number of features in the input
* `eps` is $\\epsilon$, used in $\\sqrt{Var[x^{(k)}] + \\epsilon}$ for numerical stability
* `affine` is whether to scale and shift the normalized value
"""
super().__init__()
self.channels = channels
self.groups = groups
self.eps = eps
self.affine = affine
if self.affine:
self.scale = nn.Parameter(torch.ones(groups))
self.shift = nn.Parameter(torch.zeros(groups))
def forward(self, input_0):
primals_2 = self.scale
primals_3 = self.shift
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| techthiyanes/annotated_deep_learning_paper_implementations | ChannelNorm | false | 16,554 | [
"MIT"
] | 3,714 | 8af24da2dd39a9a87482a4d18c2dc829bbd3fd47 | https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47 |
Planar | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/j6/cj6hneuwtxuqku4ka6k6f2vxucocdiv6p3b2c4kfrcb5qwbmb6sg.py
# Topologically Sorted Source Nodes: [pow_1, w_norm_sq], Original ATen: [aten.pow, aten.sum]
# Source node to ATen node mapping:
# pow_1 => pow_1
# w_norm_sq => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
triton_poi_fused_pow_sum_0 = async_compile.triton('triton_poi_fused_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vr/cvryswlc7olf6iscoyyxog3zotceoyz6z6tn3m7r27lzurpsbsjs.py
# Topologically Sorted Source Nodes: [wzb, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# wzb => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm_1, %select_2), kwargs = {})
# %tanh : [num_users=3] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = libdevice.tanh(tmp5)
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5z/c5zxdmoivium33w2qam24kojs7lyxgbfb6lulnqtriq4xzdmkzgp.py
# Topologically Sorted Source Nodes: [softplus, m_uw, sub, mul, truediv, u_hat, mul_1, z, pow_2, sub_1, psi], Original ATen: [aten.softplus, aten.add, aten.sub, aten.mul, aten.div, aten.pow, aten.rsub]
# Source node to ATen node mapping:
# m_uw => add
# mul => mul_1
# mul_1 => mul_2
# pow_2 => pow_2
# psi => mul_3
# softplus => div, exp, gt, log1p, mul, where
# sub => sub
# sub_1 => sub_1
# truediv => div_1
# u_hat => add_1
# z => add_3
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %bmm, %div), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, -1.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %bmm), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %permute_3), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %sum_1), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, %div_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %tanh), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, %mul_2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%tanh, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_2), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %sub_1), kwargs = {})
triton_poi_fused_add_div_mul_pow_rsub_softplus_sub_2 = async_compile.triton('triton_poi_fused_add_div_mul_pow_rsub_softplus_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_rsub_softplus_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_rsub_softplus_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (x2), xmask)
tmp15 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + (x2), xmask)
tmp19 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 20.0
tmp5 = tmp3 > tmp4
tmp6 = tl_math.exp(tmp3)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp5, tmp1, tmp8)
tmp10 = -1.0
tmp11 = tmp9 + tmp10
tmp12 = tmp11 - tmp1
tmp14 = tmp12 * tmp13
tmp16 = tmp14 / tmp15
tmp17 = tmp0 + tmp16
tmp20 = tmp17 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp19 * tmp19
tmp23 = tmp2 - tmp22
tmp24 = tmp13 * tmp23
tl.store(out_ptr0 + (x2), tmp17, xmask)
tl.store(out_ptr1 + (x2), tmp21, xmask)
tl.store(out_ptr2 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/as/casa25kbsjcsboxppwpjigmfgz4dqo5yacivs4ovqvekprlytwt6.py
# Topologically Sorted Source Nodes: [logdet_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# logdet_2 => add_5
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, 0.0), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl_math.log(tmp3)
tmp5 = 0.0
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [uw], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, w_norm_sq], Original ATen: [aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_sum_0.run(buf1, buf4, 4, grid=grid(4), stream=stream0)
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(primals_8, (4, 4, 1), (4, 1, 1), 0), out=buf6)
buf7 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [wzb, tanh], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_1.run(buf7, buf6, primals_7, 4, grid=grid(4), stream=stream0)
del primals_7
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, m_uw, sub, mul, truediv, u_hat, mul_1, z, pow_2, sub_1, psi], Original ATen: [aten.softplus, aten.add, aten.sub, aten.mul, aten.div, aten.pow, aten.rsub]
triton_poi_fused_add_div_mul_pow_rsub_softplus_sub_2.run(buf0, buf3, buf1, buf4, primals_8, buf7, buf5, buf8, buf9, 16, grid=grid(16), stream=stream0)
buf10 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [bmm_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, buf5, out=buf10)
buf11 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [logdet_2], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf10, buf11, 4, grid=grid(4), stream=stream0)
return (reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf11, primals_3, reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf3, buf4, buf5, buf7, buf10, reinterpret_tensor(buf9, (4, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_8, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (4, 1, 4), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = libdevice.tanh(tmp5)
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_rsub_softplus_sub_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + x2, xmask)
tmp15 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr4 + x2, xmask)
tmp19 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 20.0
tmp5 = tmp3 > tmp4
tmp6 = tl_math.exp(tmp3)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp5, tmp1, tmp8)
tmp10 = -1.0
tmp11 = tmp9 + tmp10
tmp12 = tmp11 - tmp1
tmp14 = tmp12 * tmp13
tmp16 = tmp14 / tmp15
tmp17 = tmp0 + tmp16
tmp20 = tmp17 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp19 * tmp19
tmp23 = tmp2 - tmp22
tmp24 = tmp13 * tmp23
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp21, xmask)
tl.store(out_ptr2 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_add_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl_math.log(tmp3)
tmp5 = 0.0
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_3, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_6, (4, 1),
(1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sum_0[grid(4)](buf1, buf4, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_8, (4, 4, 1), (4, 1, 1), 0), out=buf6
)
buf7 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
triton_poi_fused_add_tanh_1[grid(4)](buf7, buf6, primals_7, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_rsub_softplus_sub_2[grid(16)](buf0,
buf3, buf1, buf4, primals_8, buf7, buf5, buf8, buf9, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf10 = buf6
del buf6
extern_kernels.bmm(buf9, buf5, out=buf10)
buf11 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_add_3[grid(4)](buf10, buf11, 4, XBLOCK=4,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf8, (4, 4), (4, 1), 0
), buf11, primals_3, reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0
), buf3, buf4, buf5, buf7, buf10, reinterpret_tensor(buf9, (4, 4, 1
), (4, 1, 4), 0), reinterpret_tensor(primals_8, (4, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf0, (4, 1, 4), (4, 1, 1), 0)
class PlanarStep(nn.Module):
def __init__(self):
super(PlanarStep, self).__init__()
self.h = nn.Tanh()
self.softplus = nn.Softplus()
def _der_h(self, x):
"""Derivative of activation function h."""
return self._der_tanh(x)
def _der_tanh(self, x):
"""Derivative of the Tanh function."""
return 1 - self.h(x) ** 2
def forward(self, zk, u, w, b):
"""
Forward pass. Assumes amortized u, w and b. Conditions on diagonals of u and w for invertibility
will be be satisfied inside this function. Computes the following transformation:
z' = z + u h( w^T z + b)
or actually
z'^T = z^T + h(z^T w + b)u^T
Assumes the following input shapes:
shape u = (batch_size, z_dim, 1)
shape w = (batch_size, 1, z_dim)
shape b = (batch_size, 1, 1)
shape z = (batch_size, z_dim).
"""
zk = zk.unsqueeze(2)
uw = torch.bmm(w, u)
m_uw = -1.0 + self.softplus(uw)
w_norm_sq = torch.sum(w ** 2, dim=2, keepdim=True)
u_hat = u + (m_uw - uw) * w.transpose(2, 1) / w_norm_sq
wzb = torch.bmm(w, zk) + b
z = zk + u_hat * self.h(wzb)
z = z.squeeze(2)
psi = w * self._der_h(wzb)
logdet = torch.log(torch.abs(1 + torch.bmm(psi, u_hat)))
logdet = logdet.squeeze(2).squeeze(1)
return z, logdet
class Error(Exception):
"""Base error class, from which all other errors derive."""
pass
class InvalidArgumentError(Error):
"""This error will be shown when a given argument has an invalid value."""
pass
class NormalizingFlow(nn.Module):
"""Base class for normalizing flows."""
def __init__(self, h_dim, z_dim, flow_depth, hidden_depth):
super(NormalizingFlow, self).__init__()
self.h_dim = h_dim
self.z_dim = z_dim
self.flow_depth = flow_depth
self.hidden_depth = hidden_depth
@property
def flow_depth(self):
return self._flow_depth
@flow_depth.setter
def flow_depth(self, value):
if not isinstance(value, int):
raise InvalidArgumentError('flow_depth should be an integer.')
elif value < 1:
raise InvalidArgumentError(
'flow_depth should be strictly positive.')
else:
self._flow_depth = value
@property
def hidden_depth(self):
return self._hidden_depth
@hidden_depth.setter
def hidden_depth(self, value):
if not isinstance(value, int):
raise InvalidArgumentError('hidden_depth should be an integer.')
elif value < 0:
raise InvalidArgumentError('hidden_depth should be positive.')
else:
self._hidden_depth = value
class PlanarNew(NormalizingFlow):
"""Planar Normalizing flow with single unit bottleneck."""
def __init__(self, h_dim, z_dim, flow_depth):
super(PlanarNew, self).__init__(h_dim, z_dim, flow_depth, 0)
self.flow = PlanarStep()
self.h_to_u = nn.Linear(self.h_dim, self.flow_depth * self.z_dim)
self.h_to_w = nn.Linear(self.h_dim, self.flow_depth * self.z_dim)
self.h_to_b = nn.Linear(self.h_dim, self.flow_depth)
def forward(self, input_0, input_1):
primals_1 = self.h_to_u.weight
primals_2 = self.h_to_u.bias
primals_3 = self.h_to_w.weight
primals_5 = self.h_to_w.bias
primals_6 = self.h_to_b.weight
primals_7 = self.h_to_b.bias
primals_4 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| scfrank/deep-generative-lm | Planar | false | 4,295 | [
"MIT"
] | 0 | 70067fcda82aa035bba805ce6c2709097166a7a4 | https://github.com/scfrank/deep-generative-lm/tree/70067fcda82aa035bba805ce6c2709097166a7a4 |
UNetModule | import torch
import torch.nn as nn
import torch.backends.cudnn
import torch.utils.data
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class Conv3BN(nn.Module):
def __init__(self, in_: 'int', out: 'int', bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.SELU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetModule(nn.Module):
def __init__(self, in_: 'int', out: 'int'):
super().__init__()
self.l1 = Conv3BN(in_, out)
self.l2 = Conv3BN(out, out)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_': 4, 'out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.backends.cudnn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.store(in_out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_elu_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf3
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class Conv3BN(nn.Module):
def __init__(self, in_: 'int', out: 'int', bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.SELU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetModuleNew(nn.Module):
def __init__(self, in_: 'int', out: 'int'):
super().__init__()
self.l1 = Conv3BN(in_, out)
self.l2 = Conv3BN(out, out)
def forward(self, input_0):
primals_1 = self.l1.conv.weight
primals_2 = self.l1.conv.bias
primals_4 = self.l2.conv.weight
primals_5 = self.l2.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| jayden-chua/image-mask | UNetModule | false | 3,705 | [
"MIT"
] | 0 | ce2c6a32bf13df582e7b57e506d58518258be292 | https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292 |
ByteCombine | import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class ReRegualizedLinearNACLayer(torch.nn.Module):
def __init__(self, in_features, out_features, **kwargs):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(2.0 / (self.in_features + self.out_features))
r = min(0.5, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, -r, r)
def forward(self, input, reuse=False):
W = torch.clamp(self.W, -1, 1)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
class ByteCombine(nn.Module):
def __init__(self, input_dim, output_dim, inner_dim=1024, **kwags):
super().__init__()
self.layer_1 = ReRegualizedLinearNACLayer(input_dim, inner_dim)
self.layer_2 = ReRegualizedLinearNACLayer(inner_dim, output_dim)
self.act = nn.GELU()
self.reset_parameters()
def reset_parameters(self):
self.layer_1.reset_parameters()
self.layer_2.reset_parameters()
def forward(self, input):
return self.act(self.layer_2(self.act(self.layer_1(input))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = -1.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, None)
tl.store(out_ptr1 + x0, tmp7, None)
@triton.jit
def triton_poi_fused_gelu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1024, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 1024), (1024, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1024, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((1024, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0[grid(4096)](primals_1,
buf0, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 1024), (1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf0, (1024, 4), (1, 1024), 0)
del buf0
buf6 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_0[grid(4096)](primals_3,
buf2, buf6, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1),
torch.float32)
triton_poi_fused_gelu_1[grid(65536)](buf1, buf3, 65536, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), buf2, out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_gelu_2[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf3, (64, 1024), (1024, 1), 0
), buf4, reinterpret_tensor(buf2, (4, 1024), (1024, 1), 0), buf6, buf7
class ReRegualizedLinearNACLayer(torch.nn.Module):
def __init__(self, in_features, out_features, **kwargs):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(2.0 / (self.in_features + self.out_features))
r = min(0.5, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, -r, r)
def forward(self, input, reuse=False):
W = torch.clamp(self.W, -1, 1)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
class ByteCombineNew(nn.Module):
def __init__(self, input_dim, output_dim, inner_dim=1024, **kwags):
super().__init__()
self.layer_1 = ReRegualizedLinearNACLayer(input_dim, inner_dim)
self.layer_2 = ReRegualizedLinearNACLayer(inner_dim, output_dim)
self.act = nn.GELU()
self.reset_parameters()
def reset_parameters(self):
self.layer_1.reset_parameters()
self.layer_2.reset_parameters()
def forward(self, input_0):
primals_1 = self.layer_1.W
primals_3 = self.layer_2.W
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| CUMLSec/stateformer | ByteCombine | false | 7,932 | [
"MIT"
] | 41 | 87cb3c906c43fcff42b2ca820eb6e7fd918d0a1c | https://github.com/CUMLSec/stateformer/tree/87cb3c906c43fcff42b2ca820eb6e7fd918d0a1c |
FFN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/37/c37iajzamvg4r3s5ikb4y6kka2x3towdlz4bqoh3dx4uywvya2mb.py
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# relu => relu
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/tr/ctrdeeo45yfmpbksxog7is2d6fd26mv2poki6u26emzhamo2zqxd.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add
# x_5 => clone, var_mean
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/px/cpxbmtafvoqnd5j3oyskd4thxpat5nbj25jgagf6an6xgvaf47sv.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add
# x_5 => add_1, add_2, clone, mul, mul_1, rsqrt, sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_1, %primals_1), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y3), ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4), (64, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_3.run(buf4, primals_1, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, grid=grid(16, 4), stream=stream0)
del buf5
del buf6
del primals_7
return (buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16, 1), (16, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4), (64, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4), (16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4,
primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf5
del buf6
del primals_7
return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.
calculate_gain(w_init))
def forward(self, x):
x = self.conv(x)
return x
class FFNNew(nn.Module):
"""
Positionwise Feed-Forward Network
"""
def __init__(self, num_hidden):
"""
:param num_hidden: dimension of hidden
"""
super(FFNNew, self).__init__()
self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init=
'relu')
self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1)
self.dropout = nn.Dropout(p=0.1)
self.layer_norm = nn.LayerNorm(num_hidden)
def forward(self, input_0):
primals_2 = self.w_1.conv.weight
primals_3 = self.w_1.conv.bias
primals_4 = self.w_2.conv.weight
primals_5 = self.w_2.conv.bias
primals_6 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Munna-Manoj/Team7_TTS | FFN | false | 11,730 | [
"MIT"
] | 0 | 5e2d473a2afe429023876bcc51c2ac966a4938b8 | https://github.com/Munna-Manoj/Team7_TTS/tree/5e2d473a2afe429023876bcc51c2ac966a4938b8 |
CharbonnierLoss | import torch
import torch.nn as nn
class CharbonnierLoss(nn.Module):
def __init__(self):
super(CharbonnierLoss, self).__init__()
def forward(self, pre, gt):
N = pre.shape[0]
diff = torch.sum(torch.sqrt((pre - gt).pow(2) + 0.001 ** 2)) / N
return diff
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_pow_sqrt_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_pow_sqrt_sub_sum_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class CharbonnierLossNew(nn.Module):
def __init__(self):
super(CharbonnierLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| IndigoPurple/EFENet | CharbonnierLoss | false | 8,293 | [
"MIT"
] | 11 | e88234486f19534274a0a20badc251788ac67e31 | https://github.com/IndigoPurple/EFENet/tree/e88234486f19534274a0a20badc251788ac67e31 |
network | import torch
import torch.nn as nn
import torch.nn.functional as F
class network(nn.Module):
def __init__(self, state_size, action_size, seed=0):
super(network, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 32)
self.fc2 = nn.Linear(32, 32)
self.fc3 = nn.Linear(32, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = self.fc1(state)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf6, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3,
primals_5, buf5, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6
class networkNew(nn.Module):
def __init__(self, state_size, action_size, seed=0):
super(networkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 32)
self.fc2 = nn.Linear(32, 32)
self.fc3 = nn.Linear(32, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning | network | false | 3,072 | [
"MIT"
] | 0 | b7dc13b0116898848d8d0b8a95b7af182982bd6b | https://github.com/akashkmr27089/ReinforcementLearning_Udacity_Deep_Reinforcemnt_Learning/tree/b7dc13b0116898848d8d0b8a95b7af182982bd6b |
Envelope | import torch
import torch.utils.data
class Envelope(torch.nn.Module):
def __init__(self, exponent):
super(Envelope, self).__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, x):
p, a, b, c = self.p, self.a, self.b, self.c
x_pow_p0 = x.pow(p - 1)
x_pow_p1 = x_pow_p0 * x
x_pow_p2 = x_pow_p1 * x
return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'exponent': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 * tmp0
tmp6 = tmp5 * tmp5
tmp7 = -21.0
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp10 = tmp6 * tmp0
tmp11 = 35.0
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tmp10 * tmp0
tmp15 = -15.0
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_reciprocal_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class EnvelopeNew(torch.nn.Module):
def __init__(self, exponent):
super(EnvelopeNew, self).__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| beneisner/pytorch_geometric | Envelope | false | 6,321 | [
"MIT"
] | 1 | 53d44a96bd2de2753b1ab1d7153c026c92606a81 | https://github.com/beneisner/pytorch_geometric/tree/53d44a96bd2de2753b1ab1d7153c026c92606a81 |
BCEFocalLoss | import torch
import torch.nn as nn
class BCEFocalLoss(nn.Module):
"""Implementation of Focal Loss for Binary Classification Problems.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super(BCEFocalLoss, self).__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward method.
Args:
logits: The raw logits from the network of shape (N,*) where C = number of classes , * = extra dims
targets: The targets
Returns:
The computed loss value
"""
targets = targets.view(logits.shape)
logp = self.bce(logits, targets)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean() if self.reduction == 'mean' else loss.sum(
) if self.reduction == 'sum' else loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tmp1 - tmp14
tmp16 = tmp1 * tmp12
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0[
grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEFocalLossNew(nn.Module):
"""Implementation of Focal Loss for Binary Classification Problems.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super(BCEFocalLossNew, self).__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| earlbabson/torchflare | BCEFocalLoss | false | 6,630 | [
"Apache-2.0"
] | 1 | 15db06d313a53a3ec4640869335ba87730562b28 | https://github.com/earlbabson/torchflare/tree/15db06d313a53a3ec4640869335ba87730562b28 |
CrossEntropyLossOneHot | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [soft_preds], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# soft_preds => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pu/cpuucdcboo3bte2dptjuvdffdfvryqxeyj2uq2cx6nyw4chqfxah.py
# Topologically Sorted Source Nodes: [soft_preds, mul_res, neg, sum_res], Original ATen: [aten._log_softmax, aten.mul, aten.neg, aten.sum]
# Source node to ATen node mapping:
# mul_res => mul
# neg => neg
# soft_preds => exp, log, sub_1, sum_1
# sum_res => sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mul,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%neg, [-1]), kwargs = {})
triton_poi_fused__log_softmax_mul_neg_sum_1 = async_compile.triton('triton_poi_fused__log_softmax_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_mul_neg_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = -tmp14
tmp16 = tmp2 - tmp11
tmp18 = tmp16 * tmp17
tmp19 = -tmp18
tmp20 = tmp15 + tmp19
tmp21 = tmp5 - tmp11
tmp23 = tmp21 * tmp22
tmp24 = -tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp8 - tmp11
tmp28 = tmp26 * tmp27
tmp29 = -tmp28
tmp30 = tmp25 + tmp29
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k6/ck6ga6akxydygyrl2mzqzrieevwro3j34zy57a4hhvt3idanwv7s.py
# Topologically Sorted Source Nodes: [sum_2, cross_entropy_loss], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# cross_entropy_loss => div
# sum_2 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_2, [0]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {})
triton_poi_fused_div_sum_2 = async_compile.triton('triton_poi_fused_div_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [soft_preds], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [soft_preds, mul_res, neg, sum_res], Original ATen: [aten._log_softmax, aten.mul, aten.neg, aten.sum]
triton_poi_fused__log_softmax_mul_neg_sum_1.run(buf0, arg1_1, buf1, 64, grid=grid(64), stream=stream0)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_2, cross_entropy_loss], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_2.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = -tmp14
tmp16 = tmp2 - tmp11
tmp18 = tmp16 * tmp17
tmp19 = -tmp18
tmp20 = tmp15 + tmp19
tmp21 = tmp5 - tmp11
tmp23 = tmp21 * tmp22
tmp24 = -tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp8 - tmp11
tmp28 = tmp26 * tmp27
tmp29 = -tmp28
tmp30 = tmp25 + tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf0, arg1_1,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_sum_2[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf1
return buf2,
class CrossEntropyLossOneHotNew(nn.Module):
def __init__(self):
super(CrossEntropyLossOneHotNew, self).__init__()
self.soft_max = nn.LogSoftmax(dim=-1)
self.nll_loss = nn.NLLLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ChrisZhangcx/reproduce_elliptic | CrossEntropyLossOneHot | false | 5,009 | [
"MIT"
] | 1 | b5297456376aa944c9b17bb2394407ec482e1bb2 | https://github.com/ChrisZhangcx/reproduce_elliptic/tree/b5297456376aa944c9b17bb2394407ec482e1bb2 |
MSELoss | import torch
import torch.nn as nn
def reduction_batch_based(image_loss, M):
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M)
def reduction_image_based(image_loss, M):
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
class MSELoss(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, prediction, target, mask):
return mse_loss(prediction, target, mask, reduction=self.__reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tmp0 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_mul_sub_sum_0[grid(16)](buf2, arg0_1, arg1_1,
arg2_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0, buf2
def reduction_batch_based(image_loss, M):
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M)
def reduction_image_based(image_loss, M):
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss)
class MSELossNew(nn.Module):
def __init__(self, reduction='batch-based'):
super().__init__()
if reduction == 'batch-based':
self.__reduction = reduction_batch_based
else:
self.__reduction = reduction_image_based
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| kopetri/MIDAS_pytorch | MSELoss | false | 3,842 | [
"MIT"
] | 0 | 9e933bd241ee18b487dcd2b65c28a55d8a923292 | https://github.com/kopetri/MIDAS_pytorch/tree/9e933bd241ee18b487dcd2b65c28a55d8a923292 |
HealpixMaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/o6/co6ezbyvezbvynrzlatkrzkdv3tvccehgwlzayq3pl7wrc36sbap.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 4], [1, 4], [0, 0], [1, 1], False), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0),
class HealpixMaxPoolNew(nn.MaxPool1d):
"""Healpix Maxpooling module
"""
def __init__(self, return_indices=False):
"""Initialization
"""
super().__init__(kernel_size=4, return_indices=return_indices)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| phil-hawkins/deepsphere-pytorch | HealpixMaxPool | false | 16,245 | [
"MIT"
] | 99 | f23c531445b3ddf234c7e98cdadb010163051e6d | https://github.com/phil-hawkins/deepsphere-pytorch/tree/f23c531445b3ddf234c7e98cdadb010163051e6d |
mlp_5layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/el/cel3ti6ei3rprs2l5m6qs62p6md67qhlcbr3oxhxsqfmherljfbo.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/i5/ci5f4nyelvfg4yf2o65ompoikj7ejkd32vb6hqtyrgycc5eswrpx.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (256, 256), (256, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (128, 256), (256, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 1024, grid=grid(1024), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 256), (1, 256), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf5, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (256, 128), (1, 256), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf7, primals_9, 512, grid=grid(512), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_11
return (buf8, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256), (256, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (128, 256), (256, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 256), (
1, 256), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(1024)](buf3, primals_5, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 256), (
1, 256), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(1024)](buf5, primals_7, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (256, 128), (
1, 256), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_1[grid(512)](buf7, primals_9, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(
primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_11
return buf8, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4
class mlp_5layerNew(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_5layerNew, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 256 * width)
self.fc3 = nn.Linear(256 * width, 256 * width)
self.fc4 = nn.Linear(256 * width, 128 * width)
self.fc5 = nn.Linear(128 * width, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| mnmueller/auto_LiRPA | mlp_5layer | false | 7,259 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 |
CELoss | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch._utils
import torch.nn
class CELoss(nn.Module):
"""
Distilling the Knowledge in a Neural Network, NIPS2014.
https://arxiv.org/pdf/1503.02531.pdf
"""
def __init__(self, T=1, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
self.t = T
def forward(self, s_preds, t_preds, **kwargs):
loss = 0
for s_pred, t_pred in zip(s_preds, t_preds):
s = F.log_softmax(s_pred / self.t, dim=1)
t = F.softmax(t_pred / self.t, dim=1)
loss += torch.mean(torch.sum(-t * s, 1))
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.optim
import torch._utils
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + x3, xmask)
tmp11 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = -tmp8
tmp12 = tl_math.exp(tmp11)
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp12 + tmp14
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp15 + tmp17
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tl_math.log(tmp21)
tmp23 = tmp10 - tmp22
tmp24 = tmp9 * tmp23
tl.store(out_ptr0 + x3, tmp24, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + (128 + x3), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + (128 + x3), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + (192 + x3), xmask)
tmp3 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + (192 + x3), xmask)
tmp3 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + (64 + x3), xmask)
tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + (64 + x3), xmask)
tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused_add_mean_mul_sum_9(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None)
tmp1 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None)
tmp3 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None)
tmp5 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None)
tmp10 = tl.load(in_ptr1 + (r0 + 16 * r1), None)
tmp11 = tl.load(in_ptr1 + (4 + r0 + 16 * r1), None)
tmp13 = tl.load(in_ptr1 + (8 + r0 + 16 * r1), None)
tmp15 = tl.load(in_ptr1 + (12 + r0 + 16 * r1), None)
tmp20 = tl.load(in_ptr2 + (r0 + 16 * r1), None)
tmp21 = tl.load(in_ptr2 + (4 + r0 + 16 * r1), None)
tmp23 = tl.load(in_ptr2 + (8 + r0 + 16 * r1), None)
tmp25 = tl.load(in_ptr2 + (12 + r0 + 16 * r1), None)
tmp30 = tl.load(in_ptr3 + (r0 + 16 * r1), None)
tmp31 = tl.load(in_ptr3 + (4 + r0 + 16 * r1), None)
tmp33 = tl.load(in_ptr3 + (8 + r0 + 16 * r1), None)
tmp35 = tl.load(in_ptr3 + (12 + r0 + 16 * r1), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.sum(tmp27, 1)[:, None]
tmp32 = tmp30 + tmp31
tmp34 = tmp32 + tmp33
tmp36 = tmp34 + tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.sum(tmp37, 1)[:, None]
tmp40 = 16.0
tmp41 = tmp9 / tmp40
tmp42 = 0.0
tmp43 = tmp41 + tmp42
tmp44 = tmp19 / tmp40
tmp45 = tmp43 + tmp44
tmp46 = tmp29 / tmp40
tmp47 = tmp45 + tmp46
tmp48 = tmp39 / tmp40
tmp49 = tmp47 + tmp48
tmp50 = 1.0
tmp51 = tmp49 * tmp50
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](arg1_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_1[grid(64)](arg0_1, buf1, 64, XBLOCK=64, num_warps
=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = buf1
del buf1
triton_poi_fused__softmax_3[grid(64)](arg1_1, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf0
del buf0
triton_poi_fused_4[grid(64)](arg0_1, buf9, 64, XBLOCK=64, num_warps
=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf8,
buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf12 = buf9
del buf9
triton_poi_fused__softmax_5[grid(64)](arg1_1, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf8
del buf8
triton_poi_fused_6[grid(64)](arg0_1, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf12,
buf13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = buf13
del buf13
triton_poi_fused__softmax_7[grid(64)](arg1_1, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg1_1
buf5 = buf12
del buf12
triton_poi_fused_8[grid(64)](arg0_1, buf5, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg0_1
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf4,
buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf4
del buf5
buf11 = empty_strided_cuda((), (), torch.float32)
buf16 = buf11
del buf11
triton_per_fused_add_mean_mul_sum_9[grid(1)](buf16, buf2, buf6,
buf10, buf14, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf10
del buf14
del buf2
del buf6
return buf16,
class CELossNew(nn.Module):
"""
Distilling the Knowledge in a Neural Network, NIPS2014.
https://arxiv.org/pdf/1503.02531.pdf
"""
def __init__(self, T=1, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
self.t = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ModelTC/EOD | CELoss | false | 14,073 | [
"Apache-2.0"
] | 196 | 164bff80486e9ae6a095a97667b365c46ceabd86 | https://github.com/ModelTC/EOD/tree/164bff80486e9ae6a095a97667b365c46ceabd86 |
VarianceNorm2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/xn/cxnrqltwvo6dabbyqkzkxc7ass6ujrtpjhjlltlyg2hxd5tvdjoa.py
# Topologically Sorted Source Nodes: [vars_1, add, sqrt, h, out], Original ATen: [aten.var, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# h => div
# out => mul
# sqrt => sqrt
# vars_1 => var
# Graph fragment:
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [2, 3]), kwargs = {correction: 1, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {})
triton_per_fused_add_div_mul_sqrt_var_0 = async_compile.triton('triton_per_fused_add_div_mul_sqrt_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_sqrt_var_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_sqrt_var_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 15.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp23 = tmp0 / tmp21
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [vars_1, add, sqrt, h, out], Original ATen: [aten.var, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_sqrt_var_0.run(buf3, primals_1, primals_2, buf4, 16, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf4, primals_1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mul_sqrt_var_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 15.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp23 = tmp0 / tmp21
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp24, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mul_sqrt_var_0[grid(16)](buf3, primals_1,
primals_2, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_2
return buf4, primals_1, buf3
class VarianceNorm2dNew(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, input_0):
primals_2 = self.alpha
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| henryaddison/score_sde_pytorch | VarianceNorm2d | false | 12,488 | [
"Apache-2.0"
] | 0 | be07c3a3346bf8ceadabf6a3b436db5d5c3d0252 | https://github.com/henryaddison/score_sde_pytorch/tree/be07c3a3346bf8ceadabf6a3b436db5d5c3d0252 |
ZeroConv1d | import torch
from torch import nn
class ZeroConv1d(nn.Module):
def __init__(self, in_channel, out_channel):
super().__init__()
self.conv = nn.Conv1d(in_channel, out_channel, 1, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1))
def forward(self, x):
out = self.conv(x)
out = out * torch.exp(self.scale * 3)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0[grid(16)](buf1, primals_2,
primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4,
4), (16, 4, 1), 0), buf1
class ZeroConv1dNew(nn.Module):
def __init__(self, in_channel, out_channel):
super().__init__()
self.conv = nn.Conv1d(in_channel, out_channel, 1, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1))
def forward(self, input_0):
primals_4 = self.scale
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| batikim09/FloWaveNet | ZeroConv1d | false | 14,941 | [
"MIT"
] | 499 | 791f51aff530b2af4f9aa0d9fcb4af53d28a0997 | https://github.com/batikim09/FloWaveNet/tree/791f51aff530b2af4f9aa0d9fcb4af53d28a0997 |
Hardsigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/up/cupcnt2ednegkxpkhimpev2wbxmbkkih7j53vbxggg2ozvitm6ob.py
# Topologically Sorted Source Nodes: [mul, add, x], Original ATen: [aten.mul, aten.add, aten.clamp]
# Source node to ATen node mapping:
# add => add
# mul => mul
# x => clamp_max, clamp_min
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {})
triton_poi_fused_add_clamp_mul_0 = async_compile.triton('triton_poi_fused_add_clamp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, x], Original ATen: [aten.mul, aten.add, aten.clamp]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.2
tmp2 = tmp0 * tmp1
tmp3 = 0.5
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HardsigmoidNew(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Jo951128/2021-2-MIP | Hardsigmoid | false | 2,429 | [
"MIT"
] | 0 | 511e0a38816d16fdba9631f76cf913ba51c43138 | https://github.com/Jo951128/2021-2-MIP/tree/511e0a38816d16fdba9631f76cf913ba51c43138 |
DIoU_loss | import torch
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class DIoU_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
Inter, Union, x_max, x_min, y_max, y_min = Interction_Union(outputs,
targets)
IoU = torch.div(Inter, Union)
C_width = x_max.sub(x_min)
C_height = y_max.sub(y_min)
C = torch.mul(C_width, C_height)
x_o, y_o, x_t, y_t = Center_points(outputs, targets)
dis = torch.add(torch.pow(x_o.sub(x_t), 2), torch.pow(y_o.sub(y_t), 2))
R_DIoU = torch.div(dis, torch.pow(C, 2))
ones = torch.ones_like(IoU)
loss = torch.add(ones.sub(IoU), R_DIoU)
zeros = torch.zeros_like(loss)
loss = torch.where(loss < 0, zeros, loss)
return torch.sum(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0(
in_ptr0, in_ptr1, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex // 4 % 4
r0 = rindex % 4
r2 = rindex // 16
r3 = rindex % 16
tmp98 = tl.load(in_ptr0 + (32 + r3 + 64 * r2), None)
tmp99 = tl.load(in_ptr1 + (32 + r3 + 64 * r2), None)
tmp103 = tl.load(in_ptr0 + (48 + r3 + 64 * r2), None)
tmp104 = tl.load(in_ptr1 + (48 + r3 + 64 * r2), None)
tmp119 = tl.load(in_ptr0 + (r3 + 64 * r2), None)
tmp120 = tl.load(in_ptr1 + (r3 + 64 * r2), None)
tmp123 = tl.load(in_ptr0 + (16 + r3 + 64 * r2), None)
tmp124 = tl.load(in_ptr1 + (16 + r3 + 64 * r2), None)
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [XBLOCK,
RBLOCK]), tmp4, other=0.0)
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1, 1], 8, tl.int64)
tmp15 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * (-4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp16 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp17 = tmp16 * tmp7
tmp18 = tmp15 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tmp22 = 4 + r1
tmp24 = tmp22 < tmp3
tmp25 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * (4 + r1) + 64 * r2,
[XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp26 = tl.load(in_ptr0 + tl.broadcast_to(32 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp27 = tmp26 * tmp7
tmp28 = tmp25 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp22 >= tmp3
tmp33 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp34 = tl.load(in_ptr1 + tl.broadcast_to(32 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp35 = tmp34 * tmp7
tmp36 = tmp33 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp31, tmp36, tmp37)
tmp39 = tl.where(tmp24, tmp30, tmp38)
tmp40 = triton_helpers.maximum(tmp21, tmp39)
tmp41 = tmp5 - tmp8
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp4, tmp41, tmp42)
tmp44 = tmp15 - tmp17
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp43, tmp46)
tmp48 = tmp25 - tmp27
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp24, tmp48, tmp49)
tmp51 = tmp33 - tmp35
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp31, tmp51, tmp52)
tmp54 = tl.where(tmp24, tmp50, tmp53)
tmp55 = triton_helpers.minimum(tmp47, tmp54)
tmp56 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp57 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp4, other=0.0)
tmp58 = tmp57 * tmp7
tmp59 = tmp56 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp4, tmp59, tmp60)
tmp62 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp63 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * (-4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp12, other=0.0)
tmp64 = tmp63 * tmp7
tmp65 = tmp62 + tmp64
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp12, tmp65, tmp66)
tmp68 = tl.where(tmp4, tmp61, tmp67)
tmp69 = tl.load(in_ptr0 + tl.broadcast_to(16 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp70 = tl.load(in_ptr0 + tl.broadcast_to(48 + r0 + 4 * (4 + r1) + 64 *
r2, [XBLOCK, RBLOCK]), tmp24, other=0.0)
tmp71 = tmp70 * tmp7
tmp72 = tmp69 + tmp71
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp24, tmp72, tmp73)
tmp75 = tl.load(in_ptr1 + tl.broadcast_to(16 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp76 = tl.load(in_ptr1 + tl.broadcast_to(48 + r0 + 4 * r1 + 64 * r2, [
XBLOCK, RBLOCK]), tmp31, other=0.0)
tmp77 = tmp76 * tmp7
tmp78 = tmp75 + tmp77
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp31, tmp78, tmp79)
tmp81 = tl.where(tmp24, tmp74, tmp80)
tmp82 = triton_helpers.maximum(tmp68, tmp81)
tmp83 = tmp56 - tmp58
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp4, tmp83, tmp84)
tmp86 = tmp62 - tmp64
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp12, tmp86, tmp87)
tmp89 = tl.where(tmp4, tmp85, tmp88)
tmp90 = tmp69 - tmp71
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp24, tmp90, tmp91)
tmp93 = tmp75 - tmp77
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp31, tmp93, tmp94)
tmp96 = tl.where(tmp24, tmp92, tmp95)
tmp97 = triton_helpers.minimum(tmp89, tmp96)
tmp100 = tmp98 + tmp99
tmp101 = tmp40 - tmp55
tmp102 = tmp100 - tmp101
tmp105 = tmp103 + tmp104
tmp106 = tmp82 - tmp97
tmp107 = tmp105 - tmp106
tmp108 = tmp102 * tmp107
tmp109 = 0.0
tmp110 = tmp108 < tmp109
tmp111 = tl.where(tmp110, tmp109, tmp108)
tmp112 = tmp98 * tmp103
tmp113 = tmp99 * tmp104
tmp114 = tmp112 + tmp113
tmp115 = tmp114 - tmp111
tmp116 = tmp111 / tmp115
tmp117 = 1.0
tmp118 = tmp117 - tmp116
tmp121 = tmp119 - tmp120
tmp122 = tmp121 * tmp121
tmp125 = tmp123 - tmp124
tmp126 = tmp125 * tmp125
tmp127 = tmp122 + tmp126
tmp128 = tmp101 * tmp106
tmp129 = tmp128 * tmp128
tmp130 = tmp127 / tmp129
tmp131 = tmp118 + tmp130
tmp132 = tmp131 < tmp109
tmp133 = tl.where(tmp132, tmp109, tmp131)
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = tl.sum(tmp134, 1)[:, None]
tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp136, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_lt_max_min_mul_ones_like_pow_sub_sum_where_zeros_like_0[
grid(1)](arg0_1, arg1_1, buf6, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf6,
def Interction_Union(outputs, targets):
width_o = outputs[:, 2]
width_t = targets[:, 2]
height_o = outputs[:, 3]
height_t = targets[:, 3]
x_max = torch.max(torch.stack((outputs[:, 0] + outputs[:, 2] / 2,
targets[:, 0] + targets[:, 2] / 2), 1), 1)[0]
x_min = torch.min(torch.stack((outputs[:, 0] - outputs[:, 2] / 2,
targets[:, 0] - targets[:, 2] / 2), 1), 1)[0]
y_max = torch.max(torch.stack((outputs[:, 1] + outputs[:, 3] / 2,
targets[:, 1] + targets[:, 3] / 2), 1), 1)[0]
y_min = torch.min(torch.stack((outputs[:, 1] - outputs[:, 3] / 2,
targets[:, 1] - targets[:, 3] / 2), 1), 1)[0]
Area_o = torch.mul(width_o, height_o)
Area_t = torch.mul(width_t, height_t)
Inter_w = torch.add(width_o, width_t).sub(x_max.sub(x_min))
Inter_t = torch.add(height_o, height_t).sub(y_max.sub(y_min))
Inter = torch.mul(Inter_w, Inter_t)
zeros = torch.zeros_like(Inter)
Inter = torch.where(Inter < 0, zeros, Inter)
Union = torch.add(Area_o, Area_t).sub(Inter)
return Inter, Union, x_max, x_min, y_max, y_min
def Center_points(outputs, targets):
x_o = outputs[:, 0]
y_o = outputs[:, 1]
x_t = targets[:, 0]
y_t = targets[:, 1]
return x_o, y_o, x_t, y_t
class DIoU_lossNew(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| debrouchovea/ReproduceGoturn | DIoU_loss | false | 3,422 | [
"MIT"
] | 0 | d60f13c781ca612cacc17536530bbee989bdfa45 | https://github.com/debrouchovea/ReproduceGoturn/tree/d60f13c781ca612cacc17536530bbee989bdfa45 |
TauSTE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/te/cte662dz5uwvexkyn4i5k7zcpexn4oecbldxbr6yqmrjc6gzepr4.py
# Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy]
# Source node to ATen node mapping:
# float_1 => convert_element_type
# gt => gt
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0.0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
triton_poi_fused__to_copy_gt_0 = async_compile.triton('triton_poi_fused__to_copy_gt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_gt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_gt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from typing import Any
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TauSTEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx: 'Any', tau_threshold: 'float', input: 'Any') ->Any:
return (input > tau_threshold).float()
@staticmethod
def backward(ctx: 'Any', grad_output: 'Any') ->Any:
return None, F.hardtanh(grad_output)
class TauSTENew(Module):
def __init__(self, tau_threshold: 'float'=0.0) ->None:
super(TauSTENew, self).__init__()
self.tau_threshold = tau_threshold
def extra_repr(self) ->str:
return 'tau_threshold={}'.format(self.tau_threshold)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| atreyasha/spp-explainability | TauSTE | false | 6,285 | [
"MIT"
] | 1 | c959b837591cc1980d057a67f682e00b1f3e8e37 | https://github.com/atreyasha/spp-explainability/tree/c959b837591cc1980d057a67f682e00b1f3e8e37 |
BSS | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
class BSS(nn.Module):
"""
Knowledge Distillation with Adversarial Samples Supporting Decision Boundary
https://arxiv.org/pdf/1805.05532.pdf
"""
def __init__(self, T):
super(BSS, self).__init__()
self.T = T
def forward(self, attacked_out_s, attacked_out_t):
loss = F.kl_div(F.log_softmax(attacked_out_s / self.T, dim=1), F.
softmax(attacked_out_t / self.T, dim=1), reduction='batchmean')
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch._utils
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1)
](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class BSSNew(nn.Module):
"""
Knowledge Distillation with Adversarial Samples Supporting Decision Boundary
https://arxiv.org/pdf/1805.05532.pdf
"""
def __init__(self, T):
super(BSSNew, self).__init__()
self.T = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Capetian/FaceX-Zoo | BSS | false | 4,972 | [
"Apache-2.0"
] | 1 | 029786c40d8aba15d891d33973de25fcd7e5399a | https://github.com/Capetian/FaceX-Zoo/tree/029786c40d8aba15d891d33973de25fcd7e5399a |
ReduceMax | import torch
class ReduceMax(torch.nn.Module):
def forward(self, inputs, mask=None):
return torch.amax(inputs, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_amax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_amax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReduceMaxNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| jimthompson5802/ludwig | ReduceMax | false | 3,857 | [
"Apache-2.0"
] | 0 | 8a369328a3f839d9cdb3710be315952c7891d7c0 | https://github.com/jimthompson5802/ludwig/tree/8a369328a3f839d9cdb3710be315952c7891d7c0 |
FocalLossSigmoid | import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class FocalLossSigmoid(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoid, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, inputs, targets):
inputs.size(0)
inputs.size(1)
P = torch.sigmoid(inputs)
alpha_mask = self.alpha * targets
loss_pos = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(P
) * targets * alpha_mask
loss_neg = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(1 - P) * (
1 - targets) * (1 - alpha_mask)
batch_loss = loss_neg + loss_pos
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -1.0
tmp6 = tmp4 * tmp5
tmp7 = tl_math.log(tmp3)
tmp8 = tmp6 * tmp7
tmp10 = tmp2 - tmp9
tmp11 = tmp8 * tmp10
tmp12 = 0.25
tmp13 = tmp9 * tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp11 * tmp14
tmp16 = tl_math.log(tmp1)
tmp17 = tmp6 * tmp16
tmp18 = tmp17 * tmp9
tmp19 = tmp18 * tmp13
tmp20 = tmp15 + tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0[grid(1)](arg0_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FocalLossSigmoidNew(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoidNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| No43problem/SSD_Pytorch | FocalLossSigmoid | false | 14,100 | [
"MIT"
] | 163 | ddc548824bffbc83b540a68b176ee0261b133ee0 | https://github.com/No43problem/SSD_Pytorch/tree/ddc548824bffbc83b540a68b176ee0261b133ee0 |
Normalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_3/inductor_cache/wz/cwzcah4g5b3ejt55evsx6ffyi2xbfvkkf2onb5k7hy6quvpcd2du.py
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
# Source node to ATen node mapping:
# normalize => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(NormalizeNew, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Akababa/torch2trt | Normalize | false | 18,425 | [
"MIT"
] | 2 | 03063b74a7eb40f5aac88d49be6b8b5e4e4e92d7 | https://github.com/Akababa/torch2trt/tree/03063b74a7eb40f5aac88d49be6b8b5e4e4e92d7 |
ParallelPolarizedSelfAttention | import torch
from torch import nn
class ParallelPolarizedSelfAttention(nn.Module):
def __init__(self, channel=512):
super().__init__()
self.ch_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1))
self.ch_wq = nn.Conv2d(channel, 1, kernel_size=(1, 1))
self.softmax_channel = nn.Softmax(1)
self.softmax_spatial = nn.Softmax(-1)
self.ch_wz = nn.Conv2d(channel // 2, channel, kernel_size=(1, 1))
self.ln = nn.LayerNorm(channel)
self.sigmoid = nn.Sigmoid()
self.sp_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1))
self.sp_wq = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1))
self.agp = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
b, c, h, w = x.size()
channel_wv = self.ch_wv(x)
channel_wq = self.ch_wq(x)
channel_wv = channel_wv.reshape(b, c // 2, -1)
channel_wq = channel_wq.reshape(b, -1, 1)
channel_wq = self.softmax_channel(channel_wq)
channel_wz = torch.matmul(channel_wv, channel_wq).unsqueeze(-1)
channel_weight = self.sigmoid(self.ln(self.ch_wz(channel_wz).
reshape(b, c, 1).permute(0, 2, 1))).permute(0, 2, 1).reshape(b,
c, 1, 1)
channel_out = channel_weight * x
spatial_wv = self.sp_wv(x)
spatial_wq = self.sp_wq(x)
spatial_wq = self.agp(spatial_wq)
spatial_wv = spatial_wv.reshape(b, c // 2, -1)
spatial_wq = spatial_wq.permute(0, 2, 3, 1).reshape(b, 1, c // 2)
spatial_wq = self.softmax_spatial(spatial_wq)
spatial_wz = torch.matmul(spatial_wq, spatial_wv)
spatial_weight = self.sigmoid(spatial_wz.reshape(b, 1, h, w))
spatial_out = spatial_weight * x
out = spatial_out + channel_out
return out
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_red_fused__softmax_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
_tmp5 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tmp0 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.maximum(_tmp5, tmp4)
_tmp5 = tl.where(rmask & xmask, tmp6, _tmp5)
tmp5 = triton_helpers.max2(_tmp5, 1)[:, None]
tmp8 = tl.load(in_ptr1 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
_tmp14 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp7 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp7 + tmp9
tmp11 = tmp10 - tmp5
tmp12 = tl_math.exp(tmp11)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = _tmp14 + tmp13
_tmp14 = tl.where(rmask & xmask, tmp15, _tmp14)
tmp14 = tl.sum(_tmp14, 1)[:, None]
tmp17 = tl.load(in_ptr1 + 0)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp16 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp19 = tmp16 + tmp18
tmp20 = tmp19 - tmp5
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp21 / tmp14
tl.store(out_ptr2 + (r1 + 4096 * x0), tmp22, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1048576 * y1), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, None)
@triton.jit
def triton_per_fused_convolution_native_layer_norm_sigmoid_3(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (r1 + 512 * x0), None)
tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 512, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 512.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp2 - tmp10
tmp22 = tmp21 * tmp20
tmp24 = tmp22 * tmp23
tmp26 = tmp24 + tmp25
tmp27 = tl.sigmoid(tmp26)
tl.store(in_out_ptr0 + (r1 + 512 * x0), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp20, None)
tl.store(out_ptr1 + (r1 + 512 * x0), tmp27, None)
tl.store(out_ptr0 + x0, tmp10, None)
@triton.jit
def triton_red_fused_convolution_mean_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 256
x1 = xindex // 256
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
_tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * r2 + 32768 * x1), rmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = _tmp4 + tmp3
_tmp4 = tl.where(rmask, tmp5, _tmp4)
tmp4 = tl.sum(_tmp4, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_per_fused_convolution_mean_5(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 256
x1 = xindex // 256
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * r2 + 8192 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_6(in_ptr0, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = 4096.0
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tmp6 = tmp2 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = tmp7 / tmp10
tl.store(out_ptr2 + (r1 + 256 * x0), tmp11, None)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 512
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y3 = yindex
x2 = xindex
y1 = yindex // 4096
y0 = yindex % 4096
tmp0 = tl.load(in_ptr0 + y3, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2 + 512 * y3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr2 + (x2 + 512 * y1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp5 = tmp4 * tmp2
tmp6 = tmp3 + tmp5
tl.store(out_ptr0 + (y0 + 4096 * x2 + 2097152 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1))
assert_size_stride(primals_2, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (512, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (512,), (1,))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_13, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_1, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 64, 64), (4096, 1, 64, 1))
buf5 = empty_strided_cuda((4, 4096, 1), (4096, 1, 1), torch.float32)
triton_red_fused__softmax_1[grid(4)](buf2, primals_5, buf5, 4, 4096,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_2[grid(1024, 4096)](buf1, primals_3,
buf6, 1024, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1)
del buf1
del primals_3
buf7 = empty_strided_cuda((4, 256, 1), (256, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 256, 4096), (
1048576, 4096, 1), 0), buf5, out=buf7)
buf8 = extern_kernels.convolution(reinterpret_tensor(buf7, (4, 256,
1, 1), (256, 1, 1, 1), 0), primals_6, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf8, (4, 512, 1, 1), (512, 1, 1, 1))
buf9 = reinterpret_tensor(buf8, (4, 512, 1, 1), (512, 1, 512, 512), 0)
del buf8
buf10 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf13 = reinterpret_tensor(buf11, (4, 1, 1), (1, 1, 1), 0)
del buf11
buf14 = empty_strided_cuda((4, 1, 512), (512, 2048, 1), torch.float32)
triton_per_fused_convolution_native_layer_norm_sigmoid_3[grid(4)](buf9,
buf13, primals_7, primals_8, primals_9, buf10, buf14, 4, 512,
num_warps=4, num_stages=1)
del primals_7
buf15 = extern_kernels.convolution(buf0, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf16 = extern_kernels.convolution(buf0, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf17 = empty_strided_cuda((4, 256, 1, 1, 32), (8192, 1, 32768,
32768, 256), torch.float32)
triton_red_fused_convolution_mean_4[grid(32768)](buf16, primals_13,
buf17, 32768, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
del primals_13
buf18 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024),
torch.float32)
triton_per_fused_convolution_mean_5[grid(1024)](buf17, buf18, 1024,
32, XBLOCK=128, num_warps=8, num_stages=1)
del buf17
buf21 = empty_strided_cuda((4, 1, 256), (256, 256, 1), torch.float32)
triton_per_fused__softmax_6[grid(4)](buf18, buf21, 4, 256,
num_warps=2, num_stages=1)
del buf18
buf22 = reinterpret_tensor(buf16, (4, 256, 64, 64), (1048576, 4096,
64, 1), 0)
del buf16
triton_poi_fused_convolution_2[grid(1024, 4096)](buf15, primals_11,
buf22, 1024, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1)
del buf15
del primals_11
buf23 = reinterpret_tensor(buf2, (4, 1, 4096), (4096, 4096, 1), 0)
del buf2
extern_kernels.bmm(buf21, reinterpret_tensor(buf22, (4, 256, 4096),
(1048576, 4096, 1), 0), out=buf23)
buf24 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_mul_sigmoid_7[grid(16384, 512)](buf23, buf0,
buf14, buf24, 16384, 512, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf14
return (buf24, buf0, primals_2, primals_4, primals_6, primals_8,
primals_9, primals_10, primals_12, buf5, reinterpret_tensor(buf7, (
4, 256, 1, 1), (256, 1, 1, 1), 0), buf9, buf10, buf13, buf21, buf23,
reinterpret_tensor(buf22, (4, 4096, 256), (1048576, 1, 4096), 0),
reinterpret_tensor(buf6, (4, 4096, 256), (1048576, 1, 4096), 0))
class ParallelPolarizedSelfAttentionNew(nn.Module):
def __init__(self, channel=512):
super().__init__()
self.ch_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1))
self.ch_wq = nn.Conv2d(channel, 1, kernel_size=(1, 1))
self.softmax_channel = nn.Softmax(1)
self.softmax_spatial = nn.Softmax(-1)
self.ch_wz = nn.Conv2d(channel // 2, channel, kernel_size=(1, 1))
self.ln = nn.LayerNorm(channel)
self.sigmoid = nn.Sigmoid()
self.sp_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1))
self.sp_wq = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1))
self.agp = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, input_0):
primals_2 = self.ch_wv.weight
primals_3 = self.ch_wv.bias
primals_4 = self.ch_wq.weight
primals_5 = self.ch_wq.bias
primals_6 = self.ch_wz.weight
primals_7 = self.ch_wz.bias
primals_8 = self.ln.weight
primals_9 = self.ln.bias
primals_10 = self.sp_wv.weight
primals_11 = self.sp_wv.bias
primals_12 = self.sp_wq.weight
primals_13 = self.sp_wq.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| rushirajsherlocked/External-Attention-pytorch | ParallelPolarizedSelfAttention | false | 4,299 | [
"MIT"
] | 0 | 7d6814b2d90909adf81c62f3f8a89e30a59d6481 | https://github.com/rushirajsherlocked/External-Attention-pytorch/tree/7d6814b2d90909adf81c62f3f8a89e30a59d6481 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.