-
-
Save IanWood1/ccbcc62dca7a77d00a4ceaecb8fe38a2 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module @module { | |
util.global private @__auto.token_embd.weight = #stream.parameter.named<"model"::"token_embd.weight"> : tensor<32000x3200xf16> | |
util.global private @__auto.blk.0.attn_norm.weight = #stream.parameter.named<"model"::"blk.0.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.0.attn_q.weight = #stream.parameter.named<"model"::"blk.0.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.0.attn_k.weight = #stream.parameter.named<"model"::"blk.0.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.0.attn_v.weight = #stream.parameter.named<"model"::"blk.0.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.0.attn_output.weight = #stream.parameter.named<"model"::"blk.0.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.0.ffn_norm.weight = #stream.parameter.named<"model"::"blk.0.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.0.ffn_gate.weight = #stream.parameter.named<"model"::"blk.0.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.0.ffn_up.weight = #stream.parameter.named<"model"::"blk.0.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.0.ffn_down.weight = #stream.parameter.named<"model"::"blk.0.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.1.attn_norm.weight = #stream.parameter.named<"model"::"blk.1.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.1.attn_q.weight = #stream.parameter.named<"model"::"blk.1.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.1.attn_k.weight = #stream.parameter.named<"model"::"blk.1.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.1.attn_v.weight = #stream.parameter.named<"model"::"blk.1.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.1.attn_output.weight = #stream.parameter.named<"model"::"blk.1.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.1.ffn_norm.weight = #stream.parameter.named<"model"::"blk.1.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.1.ffn_gate.weight = #stream.parameter.named<"model"::"blk.1.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.1.ffn_up.weight = #stream.parameter.named<"model"::"blk.1.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.1.ffn_down.weight = #stream.parameter.named<"model"::"blk.1.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.2.attn_norm.weight = #stream.parameter.named<"model"::"blk.2.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.2.attn_q.weight = #stream.parameter.named<"model"::"blk.2.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.2.attn_k.weight = #stream.parameter.named<"model"::"blk.2.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.2.attn_v.weight = #stream.parameter.named<"model"::"blk.2.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.2.attn_output.weight = #stream.parameter.named<"model"::"blk.2.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.2.ffn_norm.weight = #stream.parameter.named<"model"::"blk.2.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.2.ffn_gate.weight = #stream.parameter.named<"model"::"blk.2.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.2.ffn_up.weight = #stream.parameter.named<"model"::"blk.2.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.2.ffn_down.weight = #stream.parameter.named<"model"::"blk.2.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.3.attn_norm.weight = #stream.parameter.named<"model"::"blk.3.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.3.attn_q.weight = #stream.parameter.named<"model"::"blk.3.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.3.attn_k.weight = #stream.parameter.named<"model"::"blk.3.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.3.attn_v.weight = #stream.parameter.named<"model"::"blk.3.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.3.attn_output.weight = #stream.parameter.named<"model"::"blk.3.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.3.ffn_norm.weight = #stream.parameter.named<"model"::"blk.3.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.3.ffn_gate.weight = #stream.parameter.named<"model"::"blk.3.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.3.ffn_up.weight = #stream.parameter.named<"model"::"blk.3.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.3.ffn_down.weight = #stream.parameter.named<"model"::"blk.3.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.4.attn_norm.weight = #stream.parameter.named<"model"::"blk.4.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.4.attn_q.weight = #stream.parameter.named<"model"::"blk.4.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.4.attn_k.weight = #stream.parameter.named<"model"::"blk.4.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.4.attn_v.weight = #stream.parameter.named<"model"::"blk.4.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.4.attn_output.weight = #stream.parameter.named<"model"::"blk.4.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.4.ffn_norm.weight = #stream.parameter.named<"model"::"blk.4.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.4.ffn_gate.weight = #stream.parameter.named<"model"::"blk.4.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.4.ffn_up.weight = #stream.parameter.named<"model"::"blk.4.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.4.ffn_down.weight = #stream.parameter.named<"model"::"blk.4.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.5.attn_norm.weight = #stream.parameter.named<"model"::"blk.5.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.5.attn_q.weight = #stream.parameter.named<"model"::"blk.5.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.5.attn_k.weight = #stream.parameter.named<"model"::"blk.5.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.5.attn_v.weight = #stream.parameter.named<"model"::"blk.5.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.5.attn_output.weight = #stream.parameter.named<"model"::"blk.5.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.5.ffn_norm.weight = #stream.parameter.named<"model"::"blk.5.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.5.ffn_gate.weight = #stream.parameter.named<"model"::"blk.5.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.5.ffn_up.weight = #stream.parameter.named<"model"::"blk.5.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.5.ffn_down.weight = #stream.parameter.named<"model"::"blk.5.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.6.attn_norm.weight = #stream.parameter.named<"model"::"blk.6.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.6.attn_q.weight = #stream.parameter.named<"model"::"blk.6.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.6.attn_k.weight = #stream.parameter.named<"model"::"blk.6.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.6.attn_v.weight = #stream.parameter.named<"model"::"blk.6.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.6.attn_output.weight = #stream.parameter.named<"model"::"blk.6.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.6.ffn_norm.weight = #stream.parameter.named<"model"::"blk.6.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.6.ffn_gate.weight = #stream.parameter.named<"model"::"blk.6.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.6.ffn_up.weight = #stream.parameter.named<"model"::"blk.6.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.6.ffn_down.weight = #stream.parameter.named<"model"::"blk.6.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.7.attn_norm.weight = #stream.parameter.named<"model"::"blk.7.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.7.attn_q.weight = #stream.parameter.named<"model"::"blk.7.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.7.attn_k.weight = #stream.parameter.named<"model"::"blk.7.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.7.attn_v.weight = #stream.parameter.named<"model"::"blk.7.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.7.attn_output.weight = #stream.parameter.named<"model"::"blk.7.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.7.ffn_norm.weight = #stream.parameter.named<"model"::"blk.7.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.7.ffn_gate.weight = #stream.parameter.named<"model"::"blk.7.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.7.ffn_up.weight = #stream.parameter.named<"model"::"blk.7.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.7.ffn_down.weight = #stream.parameter.named<"model"::"blk.7.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.8.attn_norm.weight = #stream.parameter.named<"model"::"blk.8.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.8.attn_q.weight = #stream.parameter.named<"model"::"blk.8.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.8.attn_k.weight = #stream.parameter.named<"model"::"blk.8.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.8.attn_v.weight = #stream.parameter.named<"model"::"blk.8.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.8.attn_output.weight = #stream.parameter.named<"model"::"blk.8.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.8.ffn_norm.weight = #stream.parameter.named<"model"::"blk.8.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.8.ffn_gate.weight = #stream.parameter.named<"model"::"blk.8.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.8.ffn_up.weight = #stream.parameter.named<"model"::"blk.8.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.8.ffn_down.weight = #stream.parameter.named<"model"::"blk.8.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.9.attn_norm.weight = #stream.parameter.named<"model"::"blk.9.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.9.attn_q.weight = #stream.parameter.named<"model"::"blk.9.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.9.attn_k.weight = #stream.parameter.named<"model"::"blk.9.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.9.attn_v.weight = #stream.parameter.named<"model"::"blk.9.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.9.attn_output.weight = #stream.parameter.named<"model"::"blk.9.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.9.ffn_norm.weight = #stream.parameter.named<"model"::"blk.9.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.9.ffn_gate.weight = #stream.parameter.named<"model"::"blk.9.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.9.ffn_up.weight = #stream.parameter.named<"model"::"blk.9.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.9.ffn_down.weight = #stream.parameter.named<"model"::"blk.9.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.10.attn_norm.weight = #stream.parameter.named<"model"::"blk.10.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.10.attn_q.weight = #stream.parameter.named<"model"::"blk.10.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.10.attn_k.weight = #stream.parameter.named<"model"::"blk.10.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.10.attn_v.weight = #stream.parameter.named<"model"::"blk.10.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.10.attn_output.weight = #stream.parameter.named<"model"::"blk.10.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.10.ffn_norm.weight = #stream.parameter.named<"model"::"blk.10.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.10.ffn_gate.weight = #stream.parameter.named<"model"::"blk.10.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.10.ffn_up.weight = #stream.parameter.named<"model"::"blk.10.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.10.ffn_down.weight = #stream.parameter.named<"model"::"blk.10.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.11.attn_norm.weight = #stream.parameter.named<"model"::"blk.11.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.11.attn_q.weight = #stream.parameter.named<"model"::"blk.11.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.11.attn_k.weight = #stream.parameter.named<"model"::"blk.11.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.11.attn_v.weight = #stream.parameter.named<"model"::"blk.11.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.11.attn_output.weight = #stream.parameter.named<"model"::"blk.11.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.11.ffn_norm.weight = #stream.parameter.named<"model"::"blk.11.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.11.ffn_gate.weight = #stream.parameter.named<"model"::"blk.11.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.11.ffn_up.weight = #stream.parameter.named<"model"::"blk.11.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.11.ffn_down.weight = #stream.parameter.named<"model"::"blk.11.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.12.attn_norm.weight = #stream.parameter.named<"model"::"blk.12.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.12.attn_q.weight = #stream.parameter.named<"model"::"blk.12.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.12.attn_k.weight = #stream.parameter.named<"model"::"blk.12.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.12.attn_v.weight = #stream.parameter.named<"model"::"blk.12.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.12.attn_output.weight = #stream.parameter.named<"model"::"blk.12.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.12.ffn_norm.weight = #stream.parameter.named<"model"::"blk.12.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.12.ffn_gate.weight = #stream.parameter.named<"model"::"blk.12.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.12.ffn_up.weight = #stream.parameter.named<"model"::"blk.12.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.12.ffn_down.weight = #stream.parameter.named<"model"::"blk.12.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.13.attn_norm.weight = #stream.parameter.named<"model"::"blk.13.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.13.attn_q.weight = #stream.parameter.named<"model"::"blk.13.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.13.attn_k.weight = #stream.parameter.named<"model"::"blk.13.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.13.attn_v.weight = #stream.parameter.named<"model"::"blk.13.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.13.attn_output.weight = #stream.parameter.named<"model"::"blk.13.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.13.ffn_norm.weight = #stream.parameter.named<"model"::"blk.13.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.13.ffn_gate.weight = #stream.parameter.named<"model"::"blk.13.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.13.ffn_up.weight = #stream.parameter.named<"model"::"blk.13.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.13.ffn_down.weight = #stream.parameter.named<"model"::"blk.13.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.14.attn_norm.weight = #stream.parameter.named<"model"::"blk.14.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.14.attn_q.weight = #stream.parameter.named<"model"::"blk.14.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.14.attn_k.weight = #stream.parameter.named<"model"::"blk.14.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.14.attn_v.weight = #stream.parameter.named<"model"::"blk.14.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.14.attn_output.weight = #stream.parameter.named<"model"::"blk.14.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.14.ffn_norm.weight = #stream.parameter.named<"model"::"blk.14.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.14.ffn_gate.weight = #stream.parameter.named<"model"::"blk.14.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.14.ffn_up.weight = #stream.parameter.named<"model"::"blk.14.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.14.ffn_down.weight = #stream.parameter.named<"model"::"blk.14.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.15.attn_norm.weight = #stream.parameter.named<"model"::"blk.15.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.15.attn_q.weight = #stream.parameter.named<"model"::"blk.15.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.15.attn_k.weight = #stream.parameter.named<"model"::"blk.15.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.15.attn_v.weight = #stream.parameter.named<"model"::"blk.15.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.15.attn_output.weight = #stream.parameter.named<"model"::"blk.15.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.15.ffn_norm.weight = #stream.parameter.named<"model"::"blk.15.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.15.ffn_gate.weight = #stream.parameter.named<"model"::"blk.15.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.15.ffn_up.weight = #stream.parameter.named<"model"::"blk.15.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.15.ffn_down.weight = #stream.parameter.named<"model"::"blk.15.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.16.attn_norm.weight = #stream.parameter.named<"model"::"blk.16.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.16.attn_q.weight = #stream.parameter.named<"model"::"blk.16.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.16.attn_k.weight = #stream.parameter.named<"model"::"blk.16.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.16.attn_v.weight = #stream.parameter.named<"model"::"blk.16.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.16.attn_output.weight = #stream.parameter.named<"model"::"blk.16.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.16.ffn_norm.weight = #stream.parameter.named<"model"::"blk.16.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.16.ffn_gate.weight = #stream.parameter.named<"model"::"blk.16.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.16.ffn_up.weight = #stream.parameter.named<"model"::"blk.16.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.16.ffn_down.weight = #stream.parameter.named<"model"::"blk.16.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.17.attn_norm.weight = #stream.parameter.named<"model"::"blk.17.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.17.attn_q.weight = #stream.parameter.named<"model"::"blk.17.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.17.attn_k.weight = #stream.parameter.named<"model"::"blk.17.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.17.attn_v.weight = #stream.parameter.named<"model"::"blk.17.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.17.attn_output.weight = #stream.parameter.named<"model"::"blk.17.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.17.ffn_norm.weight = #stream.parameter.named<"model"::"blk.17.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.17.ffn_gate.weight = #stream.parameter.named<"model"::"blk.17.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.17.ffn_up.weight = #stream.parameter.named<"model"::"blk.17.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.17.ffn_down.weight = #stream.parameter.named<"model"::"blk.17.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.18.attn_norm.weight = #stream.parameter.named<"model"::"blk.18.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.18.attn_q.weight = #stream.parameter.named<"model"::"blk.18.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.18.attn_k.weight = #stream.parameter.named<"model"::"blk.18.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.18.attn_v.weight = #stream.parameter.named<"model"::"blk.18.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.18.attn_output.weight = #stream.parameter.named<"model"::"blk.18.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.18.ffn_norm.weight = #stream.parameter.named<"model"::"blk.18.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.18.ffn_gate.weight = #stream.parameter.named<"model"::"blk.18.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.18.ffn_up.weight = #stream.parameter.named<"model"::"blk.18.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.18.ffn_down.weight = #stream.parameter.named<"model"::"blk.18.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.19.attn_norm.weight = #stream.parameter.named<"model"::"blk.19.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.19.attn_q.weight = #stream.parameter.named<"model"::"blk.19.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.19.attn_k.weight = #stream.parameter.named<"model"::"blk.19.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.19.attn_v.weight = #stream.parameter.named<"model"::"blk.19.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.19.attn_output.weight = #stream.parameter.named<"model"::"blk.19.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.19.ffn_norm.weight = #stream.parameter.named<"model"::"blk.19.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.19.ffn_gate.weight = #stream.parameter.named<"model"::"blk.19.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.19.ffn_up.weight = #stream.parameter.named<"model"::"blk.19.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.19.ffn_down.weight = #stream.parameter.named<"model"::"blk.19.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.20.attn_norm.weight = #stream.parameter.named<"model"::"blk.20.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.20.attn_q.weight = #stream.parameter.named<"model"::"blk.20.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.20.attn_k.weight = #stream.parameter.named<"model"::"blk.20.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.20.attn_v.weight = #stream.parameter.named<"model"::"blk.20.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.20.attn_output.weight = #stream.parameter.named<"model"::"blk.20.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.20.ffn_norm.weight = #stream.parameter.named<"model"::"blk.20.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.20.ffn_gate.weight = #stream.parameter.named<"model"::"blk.20.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.20.ffn_up.weight = #stream.parameter.named<"model"::"blk.20.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.20.ffn_down.weight = #stream.parameter.named<"model"::"blk.20.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.21.attn_norm.weight = #stream.parameter.named<"model"::"blk.21.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.21.attn_q.weight = #stream.parameter.named<"model"::"blk.21.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.21.attn_k.weight = #stream.parameter.named<"model"::"blk.21.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.21.attn_v.weight = #stream.parameter.named<"model"::"blk.21.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.21.attn_output.weight = #stream.parameter.named<"model"::"blk.21.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.21.ffn_norm.weight = #stream.parameter.named<"model"::"blk.21.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.21.ffn_gate.weight = #stream.parameter.named<"model"::"blk.21.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.21.ffn_up.weight = #stream.parameter.named<"model"::"blk.21.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.21.ffn_down.weight = #stream.parameter.named<"model"::"blk.21.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.22.attn_norm.weight = #stream.parameter.named<"model"::"blk.22.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.22.attn_q.weight = #stream.parameter.named<"model"::"blk.22.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.22.attn_k.weight = #stream.parameter.named<"model"::"blk.22.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.22.attn_v.weight = #stream.parameter.named<"model"::"blk.22.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.22.attn_output.weight = #stream.parameter.named<"model"::"blk.22.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.22.ffn_norm.weight = #stream.parameter.named<"model"::"blk.22.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.22.ffn_gate.weight = #stream.parameter.named<"model"::"blk.22.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.22.ffn_up.weight = #stream.parameter.named<"model"::"blk.22.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.22.ffn_down.weight = #stream.parameter.named<"model"::"blk.22.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.23.attn_norm.weight = #stream.parameter.named<"model"::"blk.23.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.23.attn_q.weight = #stream.parameter.named<"model"::"blk.23.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.23.attn_k.weight = #stream.parameter.named<"model"::"blk.23.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.23.attn_v.weight = #stream.parameter.named<"model"::"blk.23.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.23.attn_output.weight = #stream.parameter.named<"model"::"blk.23.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.23.ffn_norm.weight = #stream.parameter.named<"model"::"blk.23.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.23.ffn_gate.weight = #stream.parameter.named<"model"::"blk.23.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.23.ffn_up.weight = #stream.parameter.named<"model"::"blk.23.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.23.ffn_down.weight = #stream.parameter.named<"model"::"blk.23.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.24.attn_norm.weight = #stream.parameter.named<"model"::"blk.24.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.24.attn_q.weight = #stream.parameter.named<"model"::"blk.24.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.24.attn_k.weight = #stream.parameter.named<"model"::"blk.24.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.24.attn_v.weight = #stream.parameter.named<"model"::"blk.24.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.24.attn_output.weight = #stream.parameter.named<"model"::"blk.24.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.24.ffn_norm.weight = #stream.parameter.named<"model"::"blk.24.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.24.ffn_gate.weight = #stream.parameter.named<"model"::"blk.24.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.24.ffn_up.weight = #stream.parameter.named<"model"::"blk.24.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.24.ffn_down.weight = #stream.parameter.named<"model"::"blk.24.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.blk.25.attn_norm.weight = #stream.parameter.named<"model"::"blk.25.attn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.25.attn_q.weight = #stream.parameter.named<"model"::"blk.25.attn_q.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.25.attn_k.weight = #stream.parameter.named<"model"::"blk.25.attn_k.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.25.attn_v.weight = #stream.parameter.named<"model"::"blk.25.attn_v.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.25.attn_output.weight = #stream.parameter.named<"model"::"blk.25.attn_output.weight"> : tensor<3200x3200xf16> | |
util.global private @__auto.blk.25.ffn_norm.weight = #stream.parameter.named<"model"::"blk.25.ffn_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.blk.25.ffn_gate.weight = #stream.parameter.named<"model"::"blk.25.ffn_gate.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.25.ffn_up.weight = #stream.parameter.named<"model"::"blk.25.ffn_up.weight"> : tensor<8640x3200xf16> | |
util.global private @__auto.blk.25.ffn_down.weight = #stream.parameter.named<"model"::"blk.25.ffn_down.weight"> : tensor<3200x8640xf16> | |
util.global private @__auto.output_norm.weight = #stream.parameter.named<"model"::"output_norm.weight"> : tensor<3200xf32> | |
util.global private @__auto.output.weight = #stream.parameter.named<"model"::"output.weight"> : tensor<32000x3200xf16> | |
func.func @prefill_bs1(%arg0: !torch.vtensor<[1,?],si64> {iree.abi.affinity = #hal.device.promise<@__device_0>}, %arg1: !torch.vtensor<[1],si64> {iree.abi.affinity = #hal.device.promise<@__device_0>}, %arg2: !torch.vtensor<[1,?],si64> {iree.abi.affinity = #hal.device.promise<@__device_0>}, %arg3: !torch.tensor<[?,2662400],f16>) -> !torch.vtensor<[1,?,32000],f16> attributes {torch.assume_strict_symbolic_shapes} { | |
%__auto.token_embd.weight = util.global.load @__auto.token_embd.weight : tensor<32000x3200xf16> | |
%0 = torch_c.from_builtin_tensor %__auto.token_embd.weight : tensor<32000x3200xf16> -> !torch.vtensor<[32000,3200],f16> | |
%__auto.blk.0.attn_norm.weight = util.global.load @__auto.blk.0.attn_norm.weight : tensor<3200xf32> | |
%1 = torch_c.from_builtin_tensor %__auto.blk.0.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.0.attn_q.weight = util.global.load @__auto.blk.0.attn_q.weight : tensor<3200x3200xf16> | |
%2 = torch_c.from_builtin_tensor %__auto.blk.0.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.0.attn_k.weight = util.global.load @__auto.blk.0.attn_k.weight : tensor<3200x3200xf16> | |
%3 = torch_c.from_builtin_tensor %__auto.blk.0.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.0.attn_v.weight = util.global.load @__auto.blk.0.attn_v.weight : tensor<3200x3200xf16> | |
%4 = torch_c.from_builtin_tensor %__auto.blk.0.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.0.attn_output.weight = util.global.load @__auto.blk.0.attn_output.weight : tensor<3200x3200xf16> | |
%5 = torch_c.from_builtin_tensor %__auto.blk.0.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.0.ffn_norm.weight = util.global.load @__auto.blk.0.ffn_norm.weight : tensor<3200xf32> | |
%6 = torch_c.from_builtin_tensor %__auto.blk.0.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.0.ffn_gate.weight = util.global.load @__auto.blk.0.ffn_gate.weight : tensor<8640x3200xf16> | |
%7 = torch_c.from_builtin_tensor %__auto.blk.0.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.0.ffn_up.weight = util.global.load @__auto.blk.0.ffn_up.weight : tensor<8640x3200xf16> | |
%8 = torch_c.from_builtin_tensor %__auto.blk.0.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.0.ffn_down.weight = util.global.load @__auto.blk.0.ffn_down.weight : tensor<3200x8640xf16> | |
%9 = torch_c.from_builtin_tensor %__auto.blk.0.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.1.attn_norm.weight = util.global.load @__auto.blk.1.attn_norm.weight : tensor<3200xf32> | |
%10 = torch_c.from_builtin_tensor %__auto.blk.1.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.1.attn_q.weight = util.global.load @__auto.blk.1.attn_q.weight : tensor<3200x3200xf16> | |
%11 = torch_c.from_builtin_tensor %__auto.blk.1.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.1.attn_k.weight = util.global.load @__auto.blk.1.attn_k.weight : tensor<3200x3200xf16> | |
%12 = torch_c.from_builtin_tensor %__auto.blk.1.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.1.attn_v.weight = util.global.load @__auto.blk.1.attn_v.weight : tensor<3200x3200xf16> | |
%13 = torch_c.from_builtin_tensor %__auto.blk.1.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.1.attn_output.weight = util.global.load @__auto.blk.1.attn_output.weight : tensor<3200x3200xf16> | |
%14 = torch_c.from_builtin_tensor %__auto.blk.1.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.1.ffn_norm.weight = util.global.load @__auto.blk.1.ffn_norm.weight : tensor<3200xf32> | |
%15 = torch_c.from_builtin_tensor %__auto.blk.1.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.1.ffn_gate.weight = util.global.load @__auto.blk.1.ffn_gate.weight : tensor<8640x3200xf16> | |
%16 = torch_c.from_builtin_tensor %__auto.blk.1.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.1.ffn_up.weight = util.global.load @__auto.blk.1.ffn_up.weight : tensor<8640x3200xf16> | |
%17 = torch_c.from_builtin_tensor %__auto.blk.1.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.1.ffn_down.weight = util.global.load @__auto.blk.1.ffn_down.weight : tensor<3200x8640xf16> | |
%18 = torch_c.from_builtin_tensor %__auto.blk.1.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.2.attn_norm.weight = util.global.load @__auto.blk.2.attn_norm.weight : tensor<3200xf32> | |
%19 = torch_c.from_builtin_tensor %__auto.blk.2.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.2.attn_q.weight = util.global.load @__auto.blk.2.attn_q.weight : tensor<3200x3200xf16> | |
%20 = torch_c.from_builtin_tensor %__auto.blk.2.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.2.attn_k.weight = util.global.load @__auto.blk.2.attn_k.weight : tensor<3200x3200xf16> | |
%21 = torch_c.from_builtin_tensor %__auto.blk.2.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.2.attn_v.weight = util.global.load @__auto.blk.2.attn_v.weight : tensor<3200x3200xf16> | |
%22 = torch_c.from_builtin_tensor %__auto.blk.2.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.2.attn_output.weight = util.global.load @__auto.blk.2.attn_output.weight : tensor<3200x3200xf16> | |
%23 = torch_c.from_builtin_tensor %__auto.blk.2.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.2.ffn_norm.weight = util.global.load @__auto.blk.2.ffn_norm.weight : tensor<3200xf32> | |
%24 = torch_c.from_builtin_tensor %__auto.blk.2.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.2.ffn_gate.weight = util.global.load @__auto.blk.2.ffn_gate.weight : tensor<8640x3200xf16> | |
%25 = torch_c.from_builtin_tensor %__auto.blk.2.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.2.ffn_up.weight = util.global.load @__auto.blk.2.ffn_up.weight : tensor<8640x3200xf16> | |
%26 = torch_c.from_builtin_tensor %__auto.blk.2.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.2.ffn_down.weight = util.global.load @__auto.blk.2.ffn_down.weight : tensor<3200x8640xf16> | |
%27 = torch_c.from_builtin_tensor %__auto.blk.2.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.3.attn_norm.weight = util.global.load @__auto.blk.3.attn_norm.weight : tensor<3200xf32> | |
%28 = torch_c.from_builtin_tensor %__auto.blk.3.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.3.attn_q.weight = util.global.load @__auto.blk.3.attn_q.weight : tensor<3200x3200xf16> | |
%29 = torch_c.from_builtin_tensor %__auto.blk.3.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.3.attn_k.weight = util.global.load @__auto.blk.3.attn_k.weight : tensor<3200x3200xf16> | |
%30 = torch_c.from_builtin_tensor %__auto.blk.3.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.3.attn_v.weight = util.global.load @__auto.blk.3.attn_v.weight : tensor<3200x3200xf16> | |
%31 = torch_c.from_builtin_tensor %__auto.blk.3.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.3.attn_output.weight = util.global.load @__auto.blk.3.attn_output.weight : tensor<3200x3200xf16> | |
%32 = torch_c.from_builtin_tensor %__auto.blk.3.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.3.ffn_norm.weight = util.global.load @__auto.blk.3.ffn_norm.weight : tensor<3200xf32> | |
%33 = torch_c.from_builtin_tensor %__auto.blk.3.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.3.ffn_gate.weight = util.global.load @__auto.blk.3.ffn_gate.weight : tensor<8640x3200xf16> | |
%34 = torch_c.from_builtin_tensor %__auto.blk.3.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.3.ffn_up.weight = util.global.load @__auto.blk.3.ffn_up.weight : tensor<8640x3200xf16> | |
%35 = torch_c.from_builtin_tensor %__auto.blk.3.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.3.ffn_down.weight = util.global.load @__auto.blk.3.ffn_down.weight : tensor<3200x8640xf16> | |
%36 = torch_c.from_builtin_tensor %__auto.blk.3.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.4.attn_norm.weight = util.global.load @__auto.blk.4.attn_norm.weight : tensor<3200xf32> | |
%37 = torch_c.from_builtin_tensor %__auto.blk.4.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.4.attn_q.weight = util.global.load @__auto.blk.4.attn_q.weight : tensor<3200x3200xf16> | |
%38 = torch_c.from_builtin_tensor %__auto.blk.4.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.4.attn_k.weight = util.global.load @__auto.blk.4.attn_k.weight : tensor<3200x3200xf16> | |
%39 = torch_c.from_builtin_tensor %__auto.blk.4.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.4.attn_v.weight = util.global.load @__auto.blk.4.attn_v.weight : tensor<3200x3200xf16> | |
%40 = torch_c.from_builtin_tensor %__auto.blk.4.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.4.attn_output.weight = util.global.load @__auto.blk.4.attn_output.weight : tensor<3200x3200xf16> | |
%41 = torch_c.from_builtin_tensor %__auto.blk.4.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.4.ffn_norm.weight = util.global.load @__auto.blk.4.ffn_norm.weight : tensor<3200xf32> | |
%42 = torch_c.from_builtin_tensor %__auto.blk.4.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.4.ffn_gate.weight = util.global.load @__auto.blk.4.ffn_gate.weight : tensor<8640x3200xf16> | |
%43 = torch_c.from_builtin_tensor %__auto.blk.4.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.4.ffn_up.weight = util.global.load @__auto.blk.4.ffn_up.weight : tensor<8640x3200xf16> | |
%44 = torch_c.from_builtin_tensor %__auto.blk.4.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.4.ffn_down.weight = util.global.load @__auto.blk.4.ffn_down.weight : tensor<3200x8640xf16> | |
%45 = torch_c.from_builtin_tensor %__auto.blk.4.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.5.attn_norm.weight = util.global.load @__auto.blk.5.attn_norm.weight : tensor<3200xf32> | |
%46 = torch_c.from_builtin_tensor %__auto.blk.5.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.5.attn_q.weight = util.global.load @__auto.blk.5.attn_q.weight : tensor<3200x3200xf16> | |
%47 = torch_c.from_builtin_tensor %__auto.blk.5.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.5.attn_k.weight = util.global.load @__auto.blk.5.attn_k.weight : tensor<3200x3200xf16> | |
%48 = torch_c.from_builtin_tensor %__auto.blk.5.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.5.attn_v.weight = util.global.load @__auto.blk.5.attn_v.weight : tensor<3200x3200xf16> | |
%49 = torch_c.from_builtin_tensor %__auto.blk.5.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.5.attn_output.weight = util.global.load @__auto.blk.5.attn_output.weight : tensor<3200x3200xf16> | |
%50 = torch_c.from_builtin_tensor %__auto.blk.5.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.5.ffn_norm.weight = util.global.load @__auto.blk.5.ffn_norm.weight : tensor<3200xf32> | |
%51 = torch_c.from_builtin_tensor %__auto.blk.5.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.5.ffn_gate.weight = util.global.load @__auto.blk.5.ffn_gate.weight : tensor<8640x3200xf16> | |
%52 = torch_c.from_builtin_tensor %__auto.blk.5.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.5.ffn_up.weight = util.global.load @__auto.blk.5.ffn_up.weight : tensor<8640x3200xf16> | |
%53 = torch_c.from_builtin_tensor %__auto.blk.5.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.5.ffn_down.weight = util.global.load @__auto.blk.5.ffn_down.weight : tensor<3200x8640xf16> | |
%54 = torch_c.from_builtin_tensor %__auto.blk.5.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.6.attn_norm.weight = util.global.load @__auto.blk.6.attn_norm.weight : tensor<3200xf32> | |
%55 = torch_c.from_builtin_tensor %__auto.blk.6.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.6.attn_q.weight = util.global.load @__auto.blk.6.attn_q.weight : tensor<3200x3200xf16> | |
%56 = torch_c.from_builtin_tensor %__auto.blk.6.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.6.attn_k.weight = util.global.load @__auto.blk.6.attn_k.weight : tensor<3200x3200xf16> | |
%57 = torch_c.from_builtin_tensor %__auto.blk.6.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.6.attn_v.weight = util.global.load @__auto.blk.6.attn_v.weight : tensor<3200x3200xf16> | |
%58 = torch_c.from_builtin_tensor %__auto.blk.6.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.6.attn_output.weight = util.global.load @__auto.blk.6.attn_output.weight : tensor<3200x3200xf16> | |
%59 = torch_c.from_builtin_tensor %__auto.blk.6.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.6.ffn_norm.weight = util.global.load @__auto.blk.6.ffn_norm.weight : tensor<3200xf32> | |
%60 = torch_c.from_builtin_tensor %__auto.blk.6.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.6.ffn_gate.weight = util.global.load @__auto.blk.6.ffn_gate.weight : tensor<8640x3200xf16> | |
%61 = torch_c.from_builtin_tensor %__auto.blk.6.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.6.ffn_up.weight = util.global.load @__auto.blk.6.ffn_up.weight : tensor<8640x3200xf16> | |
%62 = torch_c.from_builtin_tensor %__auto.blk.6.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.6.ffn_down.weight = util.global.load @__auto.blk.6.ffn_down.weight : tensor<3200x8640xf16> | |
%63 = torch_c.from_builtin_tensor %__auto.blk.6.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.7.attn_norm.weight = util.global.load @__auto.blk.7.attn_norm.weight : tensor<3200xf32> | |
%64 = torch_c.from_builtin_tensor %__auto.blk.7.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.7.attn_q.weight = util.global.load @__auto.blk.7.attn_q.weight : tensor<3200x3200xf16> | |
%65 = torch_c.from_builtin_tensor %__auto.blk.7.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.7.attn_k.weight = util.global.load @__auto.blk.7.attn_k.weight : tensor<3200x3200xf16> | |
%66 = torch_c.from_builtin_tensor %__auto.blk.7.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.7.attn_v.weight = util.global.load @__auto.blk.7.attn_v.weight : tensor<3200x3200xf16> | |
%67 = torch_c.from_builtin_tensor %__auto.blk.7.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.7.attn_output.weight = util.global.load @__auto.blk.7.attn_output.weight : tensor<3200x3200xf16> | |
%68 = torch_c.from_builtin_tensor %__auto.blk.7.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.7.ffn_norm.weight = util.global.load @__auto.blk.7.ffn_norm.weight : tensor<3200xf32> | |
%69 = torch_c.from_builtin_tensor %__auto.blk.7.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.7.ffn_gate.weight = util.global.load @__auto.blk.7.ffn_gate.weight : tensor<8640x3200xf16> | |
%70 = torch_c.from_builtin_tensor %__auto.blk.7.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.7.ffn_up.weight = util.global.load @__auto.blk.7.ffn_up.weight : tensor<8640x3200xf16> | |
%71 = torch_c.from_builtin_tensor %__auto.blk.7.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.7.ffn_down.weight = util.global.load @__auto.blk.7.ffn_down.weight : tensor<3200x8640xf16> | |
%72 = torch_c.from_builtin_tensor %__auto.blk.7.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.8.attn_norm.weight = util.global.load @__auto.blk.8.attn_norm.weight : tensor<3200xf32> | |
%73 = torch_c.from_builtin_tensor %__auto.blk.8.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.8.attn_q.weight = util.global.load @__auto.blk.8.attn_q.weight : tensor<3200x3200xf16> | |
%74 = torch_c.from_builtin_tensor %__auto.blk.8.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.8.attn_k.weight = util.global.load @__auto.blk.8.attn_k.weight : tensor<3200x3200xf16> | |
%75 = torch_c.from_builtin_tensor %__auto.blk.8.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.8.attn_v.weight = util.global.load @__auto.blk.8.attn_v.weight : tensor<3200x3200xf16> | |
%76 = torch_c.from_builtin_tensor %__auto.blk.8.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.8.attn_output.weight = util.global.load @__auto.blk.8.attn_output.weight : tensor<3200x3200xf16> | |
%77 = torch_c.from_builtin_tensor %__auto.blk.8.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.8.ffn_norm.weight = util.global.load @__auto.blk.8.ffn_norm.weight : tensor<3200xf32> | |
%78 = torch_c.from_builtin_tensor %__auto.blk.8.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.8.ffn_gate.weight = util.global.load @__auto.blk.8.ffn_gate.weight : tensor<8640x3200xf16> | |
%79 = torch_c.from_builtin_tensor %__auto.blk.8.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.8.ffn_up.weight = util.global.load @__auto.blk.8.ffn_up.weight : tensor<8640x3200xf16> | |
%80 = torch_c.from_builtin_tensor %__auto.blk.8.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.8.ffn_down.weight = util.global.load @__auto.blk.8.ffn_down.weight : tensor<3200x8640xf16> | |
%81 = torch_c.from_builtin_tensor %__auto.blk.8.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.9.attn_norm.weight = util.global.load @__auto.blk.9.attn_norm.weight : tensor<3200xf32> | |
%82 = torch_c.from_builtin_tensor %__auto.blk.9.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.9.attn_q.weight = util.global.load @__auto.blk.9.attn_q.weight : tensor<3200x3200xf16> | |
%83 = torch_c.from_builtin_tensor %__auto.blk.9.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.9.attn_k.weight = util.global.load @__auto.blk.9.attn_k.weight : tensor<3200x3200xf16> | |
%84 = torch_c.from_builtin_tensor %__auto.blk.9.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.9.attn_v.weight = util.global.load @__auto.blk.9.attn_v.weight : tensor<3200x3200xf16> | |
%85 = torch_c.from_builtin_tensor %__auto.blk.9.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.9.attn_output.weight = util.global.load @__auto.blk.9.attn_output.weight : tensor<3200x3200xf16> | |
%86 = torch_c.from_builtin_tensor %__auto.blk.9.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.9.ffn_norm.weight = util.global.load @__auto.blk.9.ffn_norm.weight : tensor<3200xf32> | |
%87 = torch_c.from_builtin_tensor %__auto.blk.9.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.9.ffn_gate.weight = util.global.load @__auto.blk.9.ffn_gate.weight : tensor<8640x3200xf16> | |
%88 = torch_c.from_builtin_tensor %__auto.blk.9.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.9.ffn_up.weight = util.global.load @__auto.blk.9.ffn_up.weight : tensor<8640x3200xf16> | |
%89 = torch_c.from_builtin_tensor %__auto.blk.9.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.9.ffn_down.weight = util.global.load @__auto.blk.9.ffn_down.weight : tensor<3200x8640xf16> | |
%90 = torch_c.from_builtin_tensor %__auto.blk.9.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.10.attn_norm.weight = util.global.load @__auto.blk.10.attn_norm.weight : tensor<3200xf32> | |
%91 = torch_c.from_builtin_tensor %__auto.blk.10.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.10.attn_q.weight = util.global.load @__auto.blk.10.attn_q.weight : tensor<3200x3200xf16> | |
%92 = torch_c.from_builtin_tensor %__auto.blk.10.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.10.attn_k.weight = util.global.load @__auto.blk.10.attn_k.weight : tensor<3200x3200xf16> | |
%93 = torch_c.from_builtin_tensor %__auto.blk.10.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.10.attn_v.weight = util.global.load @__auto.blk.10.attn_v.weight : tensor<3200x3200xf16> | |
%94 = torch_c.from_builtin_tensor %__auto.blk.10.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.10.attn_output.weight = util.global.load @__auto.blk.10.attn_output.weight : tensor<3200x3200xf16> | |
%95 = torch_c.from_builtin_tensor %__auto.blk.10.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.10.ffn_norm.weight = util.global.load @__auto.blk.10.ffn_norm.weight : tensor<3200xf32> | |
%96 = torch_c.from_builtin_tensor %__auto.blk.10.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.10.ffn_gate.weight = util.global.load @__auto.blk.10.ffn_gate.weight : tensor<8640x3200xf16> | |
%97 = torch_c.from_builtin_tensor %__auto.blk.10.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.10.ffn_up.weight = util.global.load @__auto.blk.10.ffn_up.weight : tensor<8640x3200xf16> | |
%98 = torch_c.from_builtin_tensor %__auto.blk.10.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.10.ffn_down.weight = util.global.load @__auto.blk.10.ffn_down.weight : tensor<3200x8640xf16> | |
%99 = torch_c.from_builtin_tensor %__auto.blk.10.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.11.attn_norm.weight = util.global.load @__auto.blk.11.attn_norm.weight : tensor<3200xf32> | |
%100 = torch_c.from_builtin_tensor %__auto.blk.11.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.11.attn_q.weight = util.global.load @__auto.blk.11.attn_q.weight : tensor<3200x3200xf16> | |
%101 = torch_c.from_builtin_tensor %__auto.blk.11.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.11.attn_k.weight = util.global.load @__auto.blk.11.attn_k.weight : tensor<3200x3200xf16> | |
%102 = torch_c.from_builtin_tensor %__auto.blk.11.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.11.attn_v.weight = util.global.load @__auto.blk.11.attn_v.weight : tensor<3200x3200xf16> | |
%103 = torch_c.from_builtin_tensor %__auto.blk.11.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.11.attn_output.weight = util.global.load @__auto.blk.11.attn_output.weight : tensor<3200x3200xf16> | |
%104 = torch_c.from_builtin_tensor %__auto.blk.11.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.11.ffn_norm.weight = util.global.load @__auto.blk.11.ffn_norm.weight : tensor<3200xf32> | |
%105 = torch_c.from_builtin_tensor %__auto.blk.11.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.11.ffn_gate.weight = util.global.load @__auto.blk.11.ffn_gate.weight : tensor<8640x3200xf16> | |
%106 = torch_c.from_builtin_tensor %__auto.blk.11.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.11.ffn_up.weight = util.global.load @__auto.blk.11.ffn_up.weight : tensor<8640x3200xf16> | |
%107 = torch_c.from_builtin_tensor %__auto.blk.11.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.11.ffn_down.weight = util.global.load @__auto.blk.11.ffn_down.weight : tensor<3200x8640xf16> | |
%108 = torch_c.from_builtin_tensor %__auto.blk.11.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.12.attn_norm.weight = util.global.load @__auto.blk.12.attn_norm.weight : tensor<3200xf32> | |
%109 = torch_c.from_builtin_tensor %__auto.blk.12.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.12.attn_q.weight = util.global.load @__auto.blk.12.attn_q.weight : tensor<3200x3200xf16> | |
%110 = torch_c.from_builtin_tensor %__auto.blk.12.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.12.attn_k.weight = util.global.load @__auto.blk.12.attn_k.weight : tensor<3200x3200xf16> | |
%111 = torch_c.from_builtin_tensor %__auto.blk.12.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.12.attn_v.weight = util.global.load @__auto.blk.12.attn_v.weight : tensor<3200x3200xf16> | |
%112 = torch_c.from_builtin_tensor %__auto.blk.12.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.12.attn_output.weight = util.global.load @__auto.blk.12.attn_output.weight : tensor<3200x3200xf16> | |
%113 = torch_c.from_builtin_tensor %__auto.blk.12.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.12.ffn_norm.weight = util.global.load @__auto.blk.12.ffn_norm.weight : tensor<3200xf32> | |
%114 = torch_c.from_builtin_tensor %__auto.blk.12.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.12.ffn_gate.weight = util.global.load @__auto.blk.12.ffn_gate.weight : tensor<8640x3200xf16> | |
%115 = torch_c.from_builtin_tensor %__auto.blk.12.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.12.ffn_up.weight = util.global.load @__auto.blk.12.ffn_up.weight : tensor<8640x3200xf16> | |
%116 = torch_c.from_builtin_tensor %__auto.blk.12.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.12.ffn_down.weight = util.global.load @__auto.blk.12.ffn_down.weight : tensor<3200x8640xf16> | |
%117 = torch_c.from_builtin_tensor %__auto.blk.12.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.13.attn_norm.weight = util.global.load @__auto.blk.13.attn_norm.weight : tensor<3200xf32> | |
%118 = torch_c.from_builtin_tensor %__auto.blk.13.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.13.attn_q.weight = util.global.load @__auto.blk.13.attn_q.weight : tensor<3200x3200xf16> | |
%119 = torch_c.from_builtin_tensor %__auto.blk.13.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.13.attn_k.weight = util.global.load @__auto.blk.13.attn_k.weight : tensor<3200x3200xf16> | |
%120 = torch_c.from_builtin_tensor %__auto.blk.13.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.13.attn_v.weight = util.global.load @__auto.blk.13.attn_v.weight : tensor<3200x3200xf16> | |
%121 = torch_c.from_builtin_tensor %__auto.blk.13.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.13.attn_output.weight = util.global.load @__auto.blk.13.attn_output.weight : tensor<3200x3200xf16> | |
%122 = torch_c.from_builtin_tensor %__auto.blk.13.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.13.ffn_norm.weight = util.global.load @__auto.blk.13.ffn_norm.weight : tensor<3200xf32> | |
%123 = torch_c.from_builtin_tensor %__auto.blk.13.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.13.ffn_gate.weight = util.global.load @__auto.blk.13.ffn_gate.weight : tensor<8640x3200xf16> | |
%124 = torch_c.from_builtin_tensor %__auto.blk.13.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.13.ffn_up.weight = util.global.load @__auto.blk.13.ffn_up.weight : tensor<8640x3200xf16> | |
%125 = torch_c.from_builtin_tensor %__auto.blk.13.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.13.ffn_down.weight = util.global.load @__auto.blk.13.ffn_down.weight : tensor<3200x8640xf16> | |
%126 = torch_c.from_builtin_tensor %__auto.blk.13.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.14.attn_norm.weight = util.global.load @__auto.blk.14.attn_norm.weight : tensor<3200xf32> | |
%127 = torch_c.from_builtin_tensor %__auto.blk.14.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.14.attn_q.weight = util.global.load @__auto.blk.14.attn_q.weight : tensor<3200x3200xf16> | |
%128 = torch_c.from_builtin_tensor %__auto.blk.14.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.14.attn_k.weight = util.global.load @__auto.blk.14.attn_k.weight : tensor<3200x3200xf16> | |
%129 = torch_c.from_builtin_tensor %__auto.blk.14.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.14.attn_v.weight = util.global.load @__auto.blk.14.attn_v.weight : tensor<3200x3200xf16> | |
%130 = torch_c.from_builtin_tensor %__auto.blk.14.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.14.attn_output.weight = util.global.load @__auto.blk.14.attn_output.weight : tensor<3200x3200xf16> | |
%131 = torch_c.from_builtin_tensor %__auto.blk.14.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.14.ffn_norm.weight = util.global.load @__auto.blk.14.ffn_norm.weight : tensor<3200xf32> | |
%132 = torch_c.from_builtin_tensor %__auto.blk.14.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.14.ffn_gate.weight = util.global.load @__auto.blk.14.ffn_gate.weight : tensor<8640x3200xf16> | |
%133 = torch_c.from_builtin_tensor %__auto.blk.14.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.14.ffn_up.weight = util.global.load @__auto.blk.14.ffn_up.weight : tensor<8640x3200xf16> | |
%134 = torch_c.from_builtin_tensor %__auto.blk.14.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.14.ffn_down.weight = util.global.load @__auto.blk.14.ffn_down.weight : tensor<3200x8640xf16> | |
%135 = torch_c.from_builtin_tensor %__auto.blk.14.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.15.attn_norm.weight = util.global.load @__auto.blk.15.attn_norm.weight : tensor<3200xf32> | |
%136 = torch_c.from_builtin_tensor %__auto.blk.15.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.15.attn_q.weight = util.global.load @__auto.blk.15.attn_q.weight : tensor<3200x3200xf16> | |
%137 = torch_c.from_builtin_tensor %__auto.blk.15.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.15.attn_k.weight = util.global.load @__auto.blk.15.attn_k.weight : tensor<3200x3200xf16> | |
%138 = torch_c.from_builtin_tensor %__auto.blk.15.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.15.attn_v.weight = util.global.load @__auto.blk.15.attn_v.weight : tensor<3200x3200xf16> | |
%139 = torch_c.from_builtin_tensor %__auto.blk.15.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.15.attn_output.weight = util.global.load @__auto.blk.15.attn_output.weight : tensor<3200x3200xf16> | |
%140 = torch_c.from_builtin_tensor %__auto.blk.15.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.15.ffn_norm.weight = util.global.load @__auto.blk.15.ffn_norm.weight : tensor<3200xf32> | |
%141 = torch_c.from_builtin_tensor %__auto.blk.15.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.15.ffn_gate.weight = util.global.load @__auto.blk.15.ffn_gate.weight : tensor<8640x3200xf16> | |
%142 = torch_c.from_builtin_tensor %__auto.blk.15.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.15.ffn_up.weight = util.global.load @__auto.blk.15.ffn_up.weight : tensor<8640x3200xf16> | |
%143 = torch_c.from_builtin_tensor %__auto.blk.15.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.15.ffn_down.weight = util.global.load @__auto.blk.15.ffn_down.weight : tensor<3200x8640xf16> | |
%144 = torch_c.from_builtin_tensor %__auto.blk.15.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.16.attn_norm.weight = util.global.load @__auto.blk.16.attn_norm.weight : tensor<3200xf32> | |
%145 = torch_c.from_builtin_tensor %__auto.blk.16.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.16.attn_q.weight = util.global.load @__auto.blk.16.attn_q.weight : tensor<3200x3200xf16> | |
%146 = torch_c.from_builtin_tensor %__auto.blk.16.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.16.attn_k.weight = util.global.load @__auto.blk.16.attn_k.weight : tensor<3200x3200xf16> | |
%147 = torch_c.from_builtin_tensor %__auto.blk.16.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.16.attn_v.weight = util.global.load @__auto.blk.16.attn_v.weight : tensor<3200x3200xf16> | |
%148 = torch_c.from_builtin_tensor %__auto.blk.16.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.16.attn_output.weight = util.global.load @__auto.blk.16.attn_output.weight : tensor<3200x3200xf16> | |
%149 = torch_c.from_builtin_tensor %__auto.blk.16.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.16.ffn_norm.weight = util.global.load @__auto.blk.16.ffn_norm.weight : tensor<3200xf32> | |
%150 = torch_c.from_builtin_tensor %__auto.blk.16.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.16.ffn_gate.weight = util.global.load @__auto.blk.16.ffn_gate.weight : tensor<8640x3200xf16> | |
%151 = torch_c.from_builtin_tensor %__auto.blk.16.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.16.ffn_up.weight = util.global.load @__auto.blk.16.ffn_up.weight : tensor<8640x3200xf16> | |
%152 = torch_c.from_builtin_tensor %__auto.blk.16.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.16.ffn_down.weight = util.global.load @__auto.blk.16.ffn_down.weight : tensor<3200x8640xf16> | |
%153 = torch_c.from_builtin_tensor %__auto.blk.16.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.17.attn_norm.weight = util.global.load @__auto.blk.17.attn_norm.weight : tensor<3200xf32> | |
%154 = torch_c.from_builtin_tensor %__auto.blk.17.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.17.attn_q.weight = util.global.load @__auto.blk.17.attn_q.weight : tensor<3200x3200xf16> | |
%155 = torch_c.from_builtin_tensor %__auto.blk.17.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.17.attn_k.weight = util.global.load @__auto.blk.17.attn_k.weight : tensor<3200x3200xf16> | |
%156 = torch_c.from_builtin_tensor %__auto.blk.17.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.17.attn_v.weight = util.global.load @__auto.blk.17.attn_v.weight : tensor<3200x3200xf16> | |
%157 = torch_c.from_builtin_tensor %__auto.blk.17.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.17.attn_output.weight = util.global.load @__auto.blk.17.attn_output.weight : tensor<3200x3200xf16> | |
%158 = torch_c.from_builtin_tensor %__auto.blk.17.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.17.ffn_norm.weight = util.global.load @__auto.blk.17.ffn_norm.weight : tensor<3200xf32> | |
%159 = torch_c.from_builtin_tensor %__auto.blk.17.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.17.ffn_gate.weight = util.global.load @__auto.blk.17.ffn_gate.weight : tensor<8640x3200xf16> | |
%160 = torch_c.from_builtin_tensor %__auto.blk.17.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.17.ffn_up.weight = util.global.load @__auto.blk.17.ffn_up.weight : tensor<8640x3200xf16> | |
%161 = torch_c.from_builtin_tensor %__auto.blk.17.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.17.ffn_down.weight = util.global.load @__auto.blk.17.ffn_down.weight : tensor<3200x8640xf16> | |
%162 = torch_c.from_builtin_tensor %__auto.blk.17.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.18.attn_norm.weight = util.global.load @__auto.blk.18.attn_norm.weight : tensor<3200xf32> | |
%163 = torch_c.from_builtin_tensor %__auto.blk.18.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.18.attn_q.weight = util.global.load @__auto.blk.18.attn_q.weight : tensor<3200x3200xf16> | |
%164 = torch_c.from_builtin_tensor %__auto.blk.18.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.18.attn_k.weight = util.global.load @__auto.blk.18.attn_k.weight : tensor<3200x3200xf16> | |
%165 = torch_c.from_builtin_tensor %__auto.blk.18.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.18.attn_v.weight = util.global.load @__auto.blk.18.attn_v.weight : tensor<3200x3200xf16> | |
%166 = torch_c.from_builtin_tensor %__auto.blk.18.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.18.attn_output.weight = util.global.load @__auto.blk.18.attn_output.weight : tensor<3200x3200xf16> | |
%167 = torch_c.from_builtin_tensor %__auto.blk.18.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.18.ffn_norm.weight = util.global.load @__auto.blk.18.ffn_norm.weight : tensor<3200xf32> | |
%168 = torch_c.from_builtin_tensor %__auto.blk.18.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.18.ffn_gate.weight = util.global.load @__auto.blk.18.ffn_gate.weight : tensor<8640x3200xf16> | |
%169 = torch_c.from_builtin_tensor %__auto.blk.18.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.18.ffn_up.weight = util.global.load @__auto.blk.18.ffn_up.weight : tensor<8640x3200xf16> | |
%170 = torch_c.from_builtin_tensor %__auto.blk.18.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.18.ffn_down.weight = util.global.load @__auto.blk.18.ffn_down.weight : tensor<3200x8640xf16> | |
%171 = torch_c.from_builtin_tensor %__auto.blk.18.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.19.attn_norm.weight = util.global.load @__auto.blk.19.attn_norm.weight : tensor<3200xf32> | |
%172 = torch_c.from_builtin_tensor %__auto.blk.19.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.19.attn_q.weight = util.global.load @__auto.blk.19.attn_q.weight : tensor<3200x3200xf16> | |
%173 = torch_c.from_builtin_tensor %__auto.blk.19.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.19.attn_k.weight = util.global.load @__auto.blk.19.attn_k.weight : tensor<3200x3200xf16> | |
%174 = torch_c.from_builtin_tensor %__auto.blk.19.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.19.attn_v.weight = util.global.load @__auto.blk.19.attn_v.weight : tensor<3200x3200xf16> | |
%175 = torch_c.from_builtin_tensor %__auto.blk.19.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.19.attn_output.weight = util.global.load @__auto.blk.19.attn_output.weight : tensor<3200x3200xf16> | |
%176 = torch_c.from_builtin_tensor %__auto.blk.19.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.19.ffn_norm.weight = util.global.load @__auto.blk.19.ffn_norm.weight : tensor<3200xf32> | |
%177 = torch_c.from_builtin_tensor %__auto.blk.19.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.19.ffn_gate.weight = util.global.load @__auto.blk.19.ffn_gate.weight : tensor<8640x3200xf16> | |
%178 = torch_c.from_builtin_tensor %__auto.blk.19.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.19.ffn_up.weight = util.global.load @__auto.blk.19.ffn_up.weight : tensor<8640x3200xf16> | |
%179 = torch_c.from_builtin_tensor %__auto.blk.19.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.19.ffn_down.weight = util.global.load @__auto.blk.19.ffn_down.weight : tensor<3200x8640xf16> | |
%180 = torch_c.from_builtin_tensor %__auto.blk.19.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.20.attn_norm.weight = util.global.load @__auto.blk.20.attn_norm.weight : tensor<3200xf32> | |
%181 = torch_c.from_builtin_tensor %__auto.blk.20.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.20.attn_q.weight = util.global.load @__auto.blk.20.attn_q.weight : tensor<3200x3200xf16> | |
%182 = torch_c.from_builtin_tensor %__auto.blk.20.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.20.attn_k.weight = util.global.load @__auto.blk.20.attn_k.weight : tensor<3200x3200xf16> | |
%183 = torch_c.from_builtin_tensor %__auto.blk.20.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.20.attn_v.weight = util.global.load @__auto.blk.20.attn_v.weight : tensor<3200x3200xf16> | |
%184 = torch_c.from_builtin_tensor %__auto.blk.20.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.20.attn_output.weight = util.global.load @__auto.blk.20.attn_output.weight : tensor<3200x3200xf16> | |
%185 = torch_c.from_builtin_tensor %__auto.blk.20.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.20.ffn_norm.weight = util.global.load @__auto.blk.20.ffn_norm.weight : tensor<3200xf32> | |
%186 = torch_c.from_builtin_tensor %__auto.blk.20.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.20.ffn_gate.weight = util.global.load @__auto.blk.20.ffn_gate.weight : tensor<8640x3200xf16> | |
%187 = torch_c.from_builtin_tensor %__auto.blk.20.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.20.ffn_up.weight = util.global.load @__auto.blk.20.ffn_up.weight : tensor<8640x3200xf16> | |
%188 = torch_c.from_builtin_tensor %__auto.blk.20.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.20.ffn_down.weight = util.global.load @__auto.blk.20.ffn_down.weight : tensor<3200x8640xf16> | |
%189 = torch_c.from_builtin_tensor %__auto.blk.20.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.21.attn_norm.weight = util.global.load @__auto.blk.21.attn_norm.weight : tensor<3200xf32> | |
%190 = torch_c.from_builtin_tensor %__auto.blk.21.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.21.attn_q.weight = util.global.load @__auto.blk.21.attn_q.weight : tensor<3200x3200xf16> | |
%191 = torch_c.from_builtin_tensor %__auto.blk.21.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.21.attn_k.weight = util.global.load @__auto.blk.21.attn_k.weight : tensor<3200x3200xf16> | |
%192 = torch_c.from_builtin_tensor %__auto.blk.21.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.21.attn_v.weight = util.global.load @__auto.blk.21.attn_v.weight : tensor<3200x3200xf16> | |
%193 = torch_c.from_builtin_tensor %__auto.blk.21.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.21.attn_output.weight = util.global.load @__auto.blk.21.attn_output.weight : tensor<3200x3200xf16> | |
%194 = torch_c.from_builtin_tensor %__auto.blk.21.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.21.ffn_norm.weight = util.global.load @__auto.blk.21.ffn_norm.weight : tensor<3200xf32> | |
%195 = torch_c.from_builtin_tensor %__auto.blk.21.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.21.ffn_gate.weight = util.global.load @__auto.blk.21.ffn_gate.weight : tensor<8640x3200xf16> | |
%196 = torch_c.from_builtin_tensor %__auto.blk.21.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.21.ffn_up.weight = util.global.load @__auto.blk.21.ffn_up.weight : tensor<8640x3200xf16> | |
%197 = torch_c.from_builtin_tensor %__auto.blk.21.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.21.ffn_down.weight = util.global.load @__auto.blk.21.ffn_down.weight : tensor<3200x8640xf16> | |
%198 = torch_c.from_builtin_tensor %__auto.blk.21.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.22.attn_norm.weight = util.global.load @__auto.blk.22.attn_norm.weight : tensor<3200xf32> | |
%199 = torch_c.from_builtin_tensor %__auto.blk.22.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.22.attn_q.weight = util.global.load @__auto.blk.22.attn_q.weight : tensor<3200x3200xf16> | |
%200 = torch_c.from_builtin_tensor %__auto.blk.22.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.22.attn_k.weight = util.global.load @__auto.blk.22.attn_k.weight : tensor<3200x3200xf16> | |
%201 = torch_c.from_builtin_tensor %__auto.blk.22.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.22.attn_v.weight = util.global.load @__auto.blk.22.attn_v.weight : tensor<3200x3200xf16> | |
%202 = torch_c.from_builtin_tensor %__auto.blk.22.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.22.attn_output.weight = util.global.load @__auto.blk.22.attn_output.weight : tensor<3200x3200xf16> | |
%203 = torch_c.from_builtin_tensor %__auto.blk.22.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.22.ffn_norm.weight = util.global.load @__auto.blk.22.ffn_norm.weight : tensor<3200xf32> | |
%204 = torch_c.from_builtin_tensor %__auto.blk.22.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.22.ffn_gate.weight = util.global.load @__auto.blk.22.ffn_gate.weight : tensor<8640x3200xf16> | |
%205 = torch_c.from_builtin_tensor %__auto.blk.22.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.22.ffn_up.weight = util.global.load @__auto.blk.22.ffn_up.weight : tensor<8640x3200xf16> | |
%206 = torch_c.from_builtin_tensor %__auto.blk.22.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.22.ffn_down.weight = util.global.load @__auto.blk.22.ffn_down.weight : tensor<3200x8640xf16> | |
%207 = torch_c.from_builtin_tensor %__auto.blk.22.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.23.attn_norm.weight = util.global.load @__auto.blk.23.attn_norm.weight : tensor<3200xf32> | |
%208 = torch_c.from_builtin_tensor %__auto.blk.23.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.23.attn_q.weight = util.global.load @__auto.blk.23.attn_q.weight : tensor<3200x3200xf16> | |
%209 = torch_c.from_builtin_tensor %__auto.blk.23.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.23.attn_k.weight = util.global.load @__auto.blk.23.attn_k.weight : tensor<3200x3200xf16> | |
%210 = torch_c.from_builtin_tensor %__auto.blk.23.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.23.attn_v.weight = util.global.load @__auto.blk.23.attn_v.weight : tensor<3200x3200xf16> | |
%211 = torch_c.from_builtin_tensor %__auto.blk.23.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.23.attn_output.weight = util.global.load @__auto.blk.23.attn_output.weight : tensor<3200x3200xf16> | |
%212 = torch_c.from_builtin_tensor %__auto.blk.23.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.23.ffn_norm.weight = util.global.load @__auto.blk.23.ffn_norm.weight : tensor<3200xf32> | |
%213 = torch_c.from_builtin_tensor %__auto.blk.23.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.23.ffn_gate.weight = util.global.load @__auto.blk.23.ffn_gate.weight : tensor<8640x3200xf16> | |
%214 = torch_c.from_builtin_tensor %__auto.blk.23.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.23.ffn_up.weight = util.global.load @__auto.blk.23.ffn_up.weight : tensor<8640x3200xf16> | |
%215 = torch_c.from_builtin_tensor %__auto.blk.23.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.23.ffn_down.weight = util.global.load @__auto.blk.23.ffn_down.weight : tensor<3200x8640xf16> | |
%216 = torch_c.from_builtin_tensor %__auto.blk.23.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.24.attn_norm.weight = util.global.load @__auto.blk.24.attn_norm.weight : tensor<3200xf32> | |
%217 = torch_c.from_builtin_tensor %__auto.blk.24.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.24.attn_q.weight = util.global.load @__auto.blk.24.attn_q.weight : tensor<3200x3200xf16> | |
%218 = torch_c.from_builtin_tensor %__auto.blk.24.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.24.attn_k.weight = util.global.load @__auto.blk.24.attn_k.weight : tensor<3200x3200xf16> | |
%219 = torch_c.from_builtin_tensor %__auto.blk.24.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.24.attn_v.weight = util.global.load @__auto.blk.24.attn_v.weight : tensor<3200x3200xf16> | |
%220 = torch_c.from_builtin_tensor %__auto.blk.24.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.24.attn_output.weight = util.global.load @__auto.blk.24.attn_output.weight : tensor<3200x3200xf16> | |
%221 = torch_c.from_builtin_tensor %__auto.blk.24.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.24.ffn_norm.weight = util.global.load @__auto.blk.24.ffn_norm.weight : tensor<3200xf32> | |
%222 = torch_c.from_builtin_tensor %__auto.blk.24.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.24.ffn_gate.weight = util.global.load @__auto.blk.24.ffn_gate.weight : tensor<8640x3200xf16> | |
%223 = torch_c.from_builtin_tensor %__auto.blk.24.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.24.ffn_up.weight = util.global.load @__auto.blk.24.ffn_up.weight : tensor<8640x3200xf16> | |
%224 = torch_c.from_builtin_tensor %__auto.blk.24.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.24.ffn_down.weight = util.global.load @__auto.blk.24.ffn_down.weight : tensor<3200x8640xf16> | |
%225 = torch_c.from_builtin_tensor %__auto.blk.24.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.blk.25.attn_norm.weight = util.global.load @__auto.blk.25.attn_norm.weight : tensor<3200xf32> | |
%226 = torch_c.from_builtin_tensor %__auto.blk.25.attn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.25.attn_q.weight = util.global.load @__auto.blk.25.attn_q.weight : tensor<3200x3200xf16> | |
%227 = torch_c.from_builtin_tensor %__auto.blk.25.attn_q.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.25.attn_k.weight = util.global.load @__auto.blk.25.attn_k.weight : tensor<3200x3200xf16> | |
%228 = torch_c.from_builtin_tensor %__auto.blk.25.attn_k.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.25.attn_v.weight = util.global.load @__auto.blk.25.attn_v.weight : tensor<3200x3200xf16> | |
%229 = torch_c.from_builtin_tensor %__auto.blk.25.attn_v.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.25.attn_output.weight = util.global.load @__auto.blk.25.attn_output.weight : tensor<3200x3200xf16> | |
%230 = torch_c.from_builtin_tensor %__auto.blk.25.attn_output.weight : tensor<3200x3200xf16> -> !torch.vtensor<[3200,3200],f16> | |
%__auto.blk.25.ffn_norm.weight = util.global.load @__auto.blk.25.ffn_norm.weight : tensor<3200xf32> | |
%231 = torch_c.from_builtin_tensor %__auto.blk.25.ffn_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.blk.25.ffn_gate.weight = util.global.load @__auto.blk.25.ffn_gate.weight : tensor<8640x3200xf16> | |
%232 = torch_c.from_builtin_tensor %__auto.blk.25.ffn_gate.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.25.ffn_up.weight = util.global.load @__auto.blk.25.ffn_up.weight : tensor<8640x3200xf16> | |
%233 = torch_c.from_builtin_tensor %__auto.blk.25.ffn_up.weight : tensor<8640x3200xf16> -> !torch.vtensor<[8640,3200],f16> | |
%__auto.blk.25.ffn_down.weight = util.global.load @__auto.blk.25.ffn_down.weight : tensor<3200x8640xf16> | |
%234 = torch_c.from_builtin_tensor %__auto.blk.25.ffn_down.weight : tensor<3200x8640xf16> -> !torch.vtensor<[3200,8640],f16> | |
%__auto.output_norm.weight = util.global.load @__auto.output_norm.weight : tensor<3200xf32> | |
%235 = torch_c.from_builtin_tensor %__auto.output_norm.weight : tensor<3200xf32> -> !torch.vtensor<[3200],f32> | |
%__auto.output.weight = util.global.load @__auto.output.weight : tensor<32000x3200xf16> | |
%236 = torch_c.from_builtin_tensor %__auto.output.weight : tensor<32000x3200xf16> -> !torch.vtensor<[32000,3200],f16> | |
%237 = torch.copy.to_vtensor %arg3 : !torch.vtensor<[?,2662400],f16> | |
%238 = torch.symbolic_int "s1" {min_val = 2, max_val = 127} : !torch.int | |
%239 = torch.symbolic_int "s2" {min_val = 2, max_val = 9223372036854775806} : !torch.int | |
torch.bind_symbolic_shape %arg0, [%238], affine_map<()[s0] -> (1, s0 * 16)> : !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %arg2, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %237, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1 = torch.constant.int 1 | |
%240 = torch.aten.size.int %arg0, %int1 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.int | |
%int0 = torch.constant.int 0 | |
%int1_0 = torch.constant.int 1 | |
%none = torch.constant.none | |
%none_1 = torch.constant.none | |
%cpu = torch.constant.device "cpu" | |
%false = torch.constant.bool false | |
%241 = torch.aten.arange.start_step %int0, %240, %int1_0, %none, %none_1, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %241, [%238], affine_map<()[s0] -> (s0 * 16)> : !torch.vtensor<[?],si64> | |
%int-1 = torch.constant.int -1 | |
%242 = torch.aten.unsqueeze %arg1, %int-1 : !torch.vtensor<[1],si64>, !torch.int -> !torch.vtensor<[1,1],si64> | |
%243 = torch.aten.ge.Tensor %241, %242 : !torch.vtensor<[?],si64>, !torch.vtensor<[1,1],si64> -> !torch.vtensor<[1,?],i1> | |
torch.bind_symbolic_shape %243, [%238], affine_map<()[s0] -> (1, s0 * 16)> : !torch.vtensor<[1,?],i1> | |
%int1_2 = torch.constant.int 1 | |
%int1_3 = torch.constant.int 1 | |
%244 = torch.prim.ListConstruct %int1_2, %int1_3 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int11 = torch.constant.int 11 | |
%none_4 = torch.constant.none | |
%cpu_5 = torch.constant.device "cpu" | |
%false_6 = torch.constant.bool false | |
%245 = torch.aten.ones %244, %int11, %none_4, %cpu_5, %false_6 : !torch.list<int>, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,1],i1> | |
%int2048 = torch.constant.int 2048 | |
%int2048_7 = torch.constant.int 2048 | |
%246 = torch.prim.ListConstruct %int2048, %int2048_7 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_8 = torch.constant.bool false | |
%247 = torch.aten.expand %245, %246, %false_8 : !torch.vtensor<[1,1],i1>, !torch.list<int>, !torch.bool -> !torch.vtensor<[2048,2048],i1> | |
%int1_9 = torch.constant.int 1 | |
%248 = torch.aten.triu %247, %int1_9 : !torch.vtensor<[2048,2048],i1>, !torch.int -> !torch.vtensor<[2048,2048],i1> | |
%int0_10 = torch.constant.int 0 | |
%249 = torch.aten.unsqueeze %248, %int0_10 : !torch.vtensor<[2048,2048],i1>, !torch.int -> !torch.vtensor<[1,2048,2048],i1> | |
%int1_11 = torch.constant.int 1 | |
%250 = torch.aten.unsqueeze %249, %int1_11 : !torch.vtensor<[1,2048,2048],i1>, !torch.int -> !torch.vtensor<[1,1,2048,2048],i1> | |
%int2 = torch.constant.int 2 | |
%int0_12 = torch.constant.int 0 | |
%int9223372036854775807 = torch.constant.int 9223372036854775807 | |
%int1_13 = torch.constant.int 1 | |
%251 = torch.aten.slice.Tensor %250, %int2, %int0_12, %int9223372036854775807, %int1_13 : !torch.vtensor<[1,1,2048,2048],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,2048,2048],i1> | |
%int3 = torch.constant.int 3 | |
%int0_14 = torch.constant.int 0 | |
%int9223372036854775807_15 = torch.constant.int 9223372036854775807 | |
%int1_16 = torch.constant.int 1 | |
%252 = torch.aten.slice.Tensor %251, %int3, %int0_14, %int9223372036854775807_15, %int1_16 : !torch.vtensor<[1,1,2048,2048],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,2048,2048],i1> | |
%int0_17 = torch.constant.int 0 | |
%int0_18 = torch.constant.int 0 | |
%int9223372036854775807_19 = torch.constant.int 9223372036854775807 | |
%int1_20 = torch.constant.int 1 | |
%253 = torch.aten.slice.Tensor %252, %int0_17, %int0_18, %int9223372036854775807_19, %int1_20 : !torch.vtensor<[1,1,2048,2048],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,2048,2048],i1> | |
%int1_21 = torch.constant.int 1 | |
%int0_22 = torch.constant.int 0 | |
%int9223372036854775807_23 = torch.constant.int 9223372036854775807 | |
%int1_24 = torch.constant.int 1 | |
%254 = torch.aten.slice.Tensor %253, %int1_21, %int0_22, %int9223372036854775807_23, %int1_24 : !torch.vtensor<[1,1,2048,2048],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,2048,2048],i1> | |
%int0_25 = torch.constant.int 0 | |
%255 = torch.aten.size.int %241, %int0_25 : !torch.vtensor<[?],si64>, !torch.int -> !torch.int | |
%int2_26 = torch.constant.int 2 | |
%int0_27 = torch.constant.int 0 | |
%int1_28 = torch.constant.int 1 | |
%256 = torch.aten.slice.Tensor %254, %int2_26, %int0_27, %255, %int1_28 : !torch.vtensor<[1,1,2048,2048],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,?,2048],i1> | |
torch.bind_symbolic_shape %256, [%238], affine_map<()[s0] -> (1, 1, s0 * 16, 2048)> : !torch.vtensor<[1,1,?,2048],i1> | |
%int3_29 = torch.constant.int 3 | |
%int0_30 = torch.constant.int 0 | |
%int1_31 = torch.constant.int 1 | |
%257 = torch.aten.slice.Tensor %256, %int3_29, %int0_30, %255, %int1_31 : !torch.vtensor<[1,1,?,2048],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,?,?],i1> | |
torch.bind_symbolic_shape %257, [%238], affine_map<()[s0] -> (1, 1, s0 * 16, s0 * 16)> : !torch.vtensor<[1,1,?,?],i1> | |
%int0_32 = torch.constant.int 0 | |
%int0_33 = torch.constant.int 0 | |
%int9223372036854775807_34 = torch.constant.int 9223372036854775807 | |
%int1_35 = torch.constant.int 1 | |
%258 = torch.aten.slice.Tensor %243, %int0_32, %int0_33, %int9223372036854775807_34, %int1_35 : !torch.vtensor<[1,?],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?],i1> | |
torch.bind_symbolic_shape %258, [%238], affine_map<()[s0] -> (1, s0 * 16)> : !torch.vtensor<[1,?],i1> | |
%int1_36 = torch.constant.int 1 | |
%259 = torch.aten.unsqueeze %258, %int1_36 : !torch.vtensor<[1,?],i1>, !torch.int -> !torch.vtensor<[1,1,?],i1> | |
torch.bind_symbolic_shape %259, [%238], affine_map<()[s0] -> (1, 1, s0 * 16)> : !torch.vtensor<[1,1,?],i1> | |
%int2_37 = torch.constant.int 2 | |
%260 = torch.aten.unsqueeze %259, %int2_37 : !torch.vtensor<[1,1,?],i1>, !torch.int -> !torch.vtensor<[1,1,1,?],i1> | |
torch.bind_symbolic_shape %260, [%238], affine_map<()[s0] -> (1, 1, 1, s0 * 16)> : !torch.vtensor<[1,1,1,?],i1> | |
%int3_38 = torch.constant.int 3 | |
%int0_39 = torch.constant.int 0 | |
%int9223372036854775807_40 = torch.constant.int 9223372036854775807 | |
%int1_41 = torch.constant.int 1 | |
%261 = torch.aten.slice.Tensor %260, %int3_38, %int0_39, %int9223372036854775807_40, %int1_41 : !torch.vtensor<[1,1,1,?],i1>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1,?],i1> | |
torch.bind_symbolic_shape %261, [%238], affine_map<()[s0] -> (1, 1, 1, s0 * 16)> : !torch.vtensor<[1,1,1,?],i1> | |
%262 = torch.aten.logical_or %257, %261 : !torch.vtensor<[1,1,?,?],i1>, !torch.vtensor<[1,1,1,?],i1> -> !torch.vtensor<[1,1,?,?],i1> | |
torch.bind_symbolic_shape %262, [%238], affine_map<()[s0] -> (1, 1, s0 * 16, s0 * 16)> : !torch.vtensor<[1,1,?,?],i1> | |
%int0_42 = torch.constant.int 0 | |
%int6 = torch.constant.int 6 | |
%int0_43 = torch.constant.int 0 | |
%cpu_44 = torch.constant.device "cpu" | |
%none_45 = torch.constant.none | |
%263 = torch.aten.scalar_tensor %int0_42, %int6, %int0_43, %cpu_44, %none_45 : !torch.int, !torch.int, !torch.int, !torch.Device, !torch.none -> !torch.vtensor<[],f32> | |
%float-Inf = torch.constant.float 0xFFF0000000000000 | |
%int6_46 = torch.constant.int 6 | |
%int0_47 = torch.constant.int 0 | |
%cpu_48 = torch.constant.device "cpu" | |
%none_49 = torch.constant.none | |
%264 = torch.aten.scalar_tensor %float-Inf, %int6_46, %int0_47, %cpu_48, %none_49 : !torch.float, !torch.int, !torch.int, !torch.Device, !torch.none -> !torch.vtensor<[],f32> | |
%265 = torch.aten.where.self %262, %264, %263 : !torch.vtensor<[1,1,?,?],i1>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,1,?,?],f32> | |
torch.bind_symbolic_shape %265, [%238], affine_map<()[s0] -> (1, 1, s0 * 16, s0 * 16)> : !torch.vtensor<[1,1,?,?],f32> | |
%int5 = torch.constant.int 5 | |
%266 = torch.prims.convert_element_type %265, %int5 : !torch.vtensor<[1,1,?,?],f32>, !torch.int -> !torch.vtensor<[1,1,?,?],f16> | |
torch.bind_symbolic_shape %266, [%238], affine_map<()[s0] -> (1, 1, s0 * 16, s0 * 16)> : !torch.vtensor<[1,1,?,?],f16> | |
%int-1_50 = torch.constant.int -1 | |
%false_51 = torch.constant.bool false | |
%false_52 = torch.constant.bool false | |
%267 = torch.aten.embedding %0, %arg0, %int-1_50, %false_51, %false_52 : !torch.vtensor<[32000,3200],f16>, !torch.vtensor<[1,?],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %267, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_53 = torch.constant.int 6 | |
%268 = torch.prims.convert_element_type %267, %int6_53 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %268, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_54 = torch.constant.int 2 | |
%269 = torch.aten.pow.Tensor_Scalar %268, %int2_54 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %269, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_55 = torch.constant.int -1 | |
%270 = torch.prim.ListConstruct %int-1_55 : (!torch.int) -> !torch.list<int> | |
%true = torch.constant.bool true | |
%none_56 = torch.constant.none | |
%271 = torch.aten.mean.dim %269, %270, %true, %none_56 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %271, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07 = torch.constant.float 9.9999999747524271E-7 | |
%int1_57 = torch.constant.int 1 | |
%272 = torch.aten.add.Scalar %271, %float9.999990e-07, %int1_57 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %272, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%273 = torch.aten.rsqrt %272 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %273, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%274 = torch.aten.mul.Tensor %268, %273 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %274, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%275 = torch.aten.mul.Tensor %1, %274 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %275, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_58 = torch.constant.int 5 | |
%276 = torch.prims.convert_element_type %275, %int5_58 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %276, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2 = torch.constant.int -2 | |
%int-1_59 = torch.constant.int -1 | |
%277 = torch.aten.transpose.int %2, %int-2, %int-1_59 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200 = torch.constant.int 3200 | |
%278 = torch.prim.ListConstruct %240, %int3200 : (!torch.int, !torch.int) -> !torch.list<int> | |
%279 = torch.aten.view %276, %278 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %279, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%280 = torch.aten.mm %279, %277 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %280, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_60 = torch.constant.int 1 | |
%int3200_61 = torch.constant.int 3200 | |
%281 = torch.prim.ListConstruct %int1_60, %240, %int3200_61 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%282 = torch.aten.view %280, %281 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %282, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_62 = torch.constant.int -2 | |
%int-1_63 = torch.constant.int -1 | |
%283 = torch.aten.transpose.int %3, %int-2_62, %int-1_63 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_64 = torch.constant.int 3200 | |
%284 = torch.prim.ListConstruct %240, %int3200_64 : (!torch.int, !torch.int) -> !torch.list<int> | |
%285 = torch.aten.view %276, %284 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %285, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%286 = torch.aten.mm %285, %283 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %286, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_65 = torch.constant.int 1 | |
%int3200_66 = torch.constant.int 3200 | |
%287 = torch.prim.ListConstruct %int1_65, %240, %int3200_66 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%288 = torch.aten.view %286, %287 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %288, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_67 = torch.constant.int -2 | |
%int-1_68 = torch.constant.int -1 | |
%289 = torch.aten.transpose.int %4, %int-2_67, %int-1_68 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_69 = torch.constant.int 3200 | |
%290 = torch.prim.ListConstruct %240, %int3200_69 : (!torch.int, !torch.int) -> !torch.list<int> | |
%291 = torch.aten.view %276, %290 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %291, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%292 = torch.aten.mm %291, %289 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %292, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_70 = torch.constant.int 1 | |
%int3200_71 = torch.constant.int 3200 | |
%293 = torch.prim.ListConstruct %int1_70, %240, %int3200_71 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%294 = torch.aten.view %292, %293 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %294, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_72 = torch.constant.int 1 | |
%int32 = torch.constant.int 32 | |
%int100 = torch.constant.int 100 | |
%295 = torch.prim.ListConstruct %int1_72, %240, %int32, %int100 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%296 = torch.aten.view %282, %295 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %296, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_73 = torch.constant.int 1 | |
%int32_74 = torch.constant.int 32 | |
%int100_75 = torch.constant.int 100 | |
%297 = torch.prim.ListConstruct %int1_73, %240, %int32_74, %int100_75 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%298 = torch.aten.view %288, %297 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %298, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_76 = torch.constant.int 1 | |
%int32_77 = torch.constant.int 32 | |
%int100_78 = torch.constant.int 100 | |
%299 = torch.prim.ListConstruct %int1_76, %240, %int32_77, %int100_78 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%300 = torch.aten.view %294, %299 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %300, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_79 = torch.constant.int 2048 | |
%none_80 = torch.constant.none | |
%none_81 = torch.constant.none | |
%cpu_82 = torch.constant.device "cpu" | |
%false_83 = torch.constant.bool false | |
%301 = torch.aten.arange %int2048_79, %none_80, %none_81, %cpu_82, %false_83 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_84 = torch.constant.int 0 | |
%int100_85 = torch.constant.int 100 | |
%int2_86 = torch.constant.int 2 | |
%none_87 = torch.constant.none | |
%none_88 = torch.constant.none | |
%cpu_89 = torch.constant.device "cpu" | |
%false_90 = torch.constant.bool false | |
%302 = torch.aten.arange.start_step %int0_84, %int100_85, %int2_86, %none_87, %none_88, %cpu_89, %false_90 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_91 = torch.constant.int 0 | |
%int0_92 = torch.constant.int 0 | |
%int50 = torch.constant.int 50 | |
%int1_93 = torch.constant.int 1 | |
%303 = torch.aten.slice.Tensor %302, %int0_91, %int0_92, %int50, %int1_93 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_94 = torch.constant.int 6 | |
%304 = torch.prims.convert_element_type %303, %int6_94 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_95 = torch.constant.int 100 | |
%305 = torch.aten.div.Scalar %304, %int100_95 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04 = torch.constant.float 1.000000e+04 | |
%306 = torch.aten.pow.Scalar %float1.000000e04, %305 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%307 = torch.aten.reciprocal %306 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00 = torch.constant.float 1.000000e+00 | |
%308 = torch.aten.mul.Scalar %307, %float1.000000e00 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_96 = torch.constant.int 2048 | |
%int1_97 = torch.constant.int 1 | |
%309 = torch.prim.ListConstruct %int2048_96, %int1_97 : (!torch.int, !torch.int) -> !torch.list<int> | |
%310 = torch.aten.view %301, %309 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%311 = torch.aten.mul.Tensor %310, %308 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%312 = torch.aten.cos %311 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%313 = torch.aten.sin %311 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%314 = torch.aten.complex %312, %313 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_98 = torch.constant.int 1 | |
%315 = torch.aten.size.int %282, %int1_98 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_99 = torch.constant.int 0 | |
%316 = torch.aten.add.int %int0_99, %315 : !torch.int, !torch.int -> !torch.int | |
%int0_100 = torch.constant.int 0 | |
%int0_101 = torch.constant.int 0 | |
%int1_102 = torch.constant.int 1 | |
%317 = torch.aten.slice.Tensor %314, %int0_100, %int0_101, %316, %int1_102 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %317, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_103 = torch.constant.int 1 | |
%int0_104 = torch.constant.int 0 | |
%int9223372036854775807_105 = torch.constant.int 9223372036854775807 | |
%int1_106 = torch.constant.int 1 | |
%318 = torch.aten.slice.Tensor %317, %int1_103, %int0_104, %int9223372036854775807_105, %int1_106 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %318, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_107 = torch.constant.int 0 | |
%319 = torch.aten.unsqueeze %318, %int0_107 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %319, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_108 = torch.constant.int 2 | |
%320 = torch.aten.unsqueeze %319, %int2_108 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %320, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_109 = torch.constant.int 3 | |
%int0_110 = torch.constant.int 0 | |
%int9223372036854775807_111 = torch.constant.int 9223372036854775807 | |
%int1_112 = torch.constant.int 1 | |
%321 = torch.aten.slice.Tensor %320, %int3_109, %int0_110, %int9223372036854775807_111, %int1_112 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %321, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%322 = torch_c.to_builtin_tensor %296 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1 = arith.constant 1 : index | |
%dim = tensor.dim %322, %c1 : tensor<1x?x32x100xf16> | |
%323 = flow.tensor.bitcast %322 : tensor<1x?x32x100xf16>{%dim} -> tensor<1x?x32x50xcomplex<f16>>{%dim} | |
%324 = torch_c.from_builtin_tensor %323 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %324, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%325 = torch.aten.mul.Tensor %324, %321 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %325, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%326 = torch_c.to_builtin_tensor %325 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_113 = arith.constant 1 : index | |
%dim_114 = tensor.dim %326, %c1_113 : tensor<1x?x32x50xcomplex<f32>> | |
%327 = flow.tensor.bitcast %326 : tensor<1x?x32x50xcomplex<f32>>{%dim_114} -> tensor<1x?x32x100xf32>{%dim_114} | |
%328 = torch_c.from_builtin_tensor %327 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %328, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_115 = torch.constant.int 5 | |
%329 = torch.prims.convert_element_type %328, %int5_115 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %329, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_116 = torch.constant.int 2048 | |
%none_117 = torch.constant.none | |
%none_118 = torch.constant.none | |
%cpu_119 = torch.constant.device "cpu" | |
%false_120 = torch.constant.bool false | |
%330 = torch.aten.arange %int2048_116, %none_117, %none_118, %cpu_119, %false_120 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_121 = torch.constant.int 0 | |
%int100_122 = torch.constant.int 100 | |
%int2_123 = torch.constant.int 2 | |
%none_124 = torch.constant.none | |
%none_125 = torch.constant.none | |
%cpu_126 = torch.constant.device "cpu" | |
%false_127 = torch.constant.bool false | |
%331 = torch.aten.arange.start_step %int0_121, %int100_122, %int2_123, %none_124, %none_125, %cpu_126, %false_127 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_128 = torch.constant.int 0 | |
%int0_129 = torch.constant.int 0 | |
%int50_130 = torch.constant.int 50 | |
%int1_131 = torch.constant.int 1 | |
%332 = torch.aten.slice.Tensor %331, %int0_128, %int0_129, %int50_130, %int1_131 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_132 = torch.constant.int 6 | |
%333 = torch.prims.convert_element_type %332, %int6_132 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_133 = torch.constant.int 100 | |
%334 = torch.aten.div.Scalar %333, %int100_133 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_134 = torch.constant.float 1.000000e+04 | |
%335 = torch.aten.pow.Scalar %float1.000000e04_134, %334 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%336 = torch.aten.reciprocal %335 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_135 = torch.constant.float 1.000000e+00 | |
%337 = torch.aten.mul.Scalar %336, %float1.000000e00_135 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_136 = torch.constant.int 2048 | |
%int1_137 = torch.constant.int 1 | |
%338 = torch.prim.ListConstruct %int2048_136, %int1_137 : (!torch.int, !torch.int) -> !torch.list<int> | |
%339 = torch.aten.view %330, %338 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%340 = torch.aten.mul.Tensor %339, %337 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%341 = torch.aten.cos %340 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%342 = torch.aten.sin %340 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%343 = torch.aten.complex %341, %342 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_138 = torch.constant.int 1 | |
%344 = torch.aten.size.int %288, %int1_138 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_139 = torch.constant.int 0 | |
%345 = torch.aten.add.int %int0_139, %344 : !torch.int, !torch.int -> !torch.int | |
%int0_140 = torch.constant.int 0 | |
%int0_141 = torch.constant.int 0 | |
%int1_142 = torch.constant.int 1 | |
%346 = torch.aten.slice.Tensor %343, %int0_140, %int0_141, %345, %int1_142 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %346, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_143 = torch.constant.int 1 | |
%int0_144 = torch.constant.int 0 | |
%int9223372036854775807_145 = torch.constant.int 9223372036854775807 | |
%int1_146 = torch.constant.int 1 | |
%347 = torch.aten.slice.Tensor %346, %int1_143, %int0_144, %int9223372036854775807_145, %int1_146 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %347, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_147 = torch.constant.int 0 | |
%348 = torch.aten.unsqueeze %347, %int0_147 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %348, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_148 = torch.constant.int 2 | |
%349 = torch.aten.unsqueeze %348, %int2_148 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %349, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_149 = torch.constant.int 3 | |
%int0_150 = torch.constant.int 0 | |
%int9223372036854775807_151 = torch.constant.int 9223372036854775807 | |
%int1_152 = torch.constant.int 1 | |
%350 = torch.aten.slice.Tensor %349, %int3_149, %int0_150, %int9223372036854775807_151, %int1_152 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %350, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%351 = torch_c.to_builtin_tensor %298 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_153 = arith.constant 1 : index | |
%dim_154 = tensor.dim %351, %c1_153 : tensor<1x?x32x100xf16> | |
%352 = flow.tensor.bitcast %351 : tensor<1x?x32x100xf16>{%dim_154} -> tensor<1x?x32x50xcomplex<f16>>{%dim_154} | |
%353 = torch_c.from_builtin_tensor %352 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %353, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%354 = torch.aten.mul.Tensor %353, %350 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %354, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%355 = torch_c.to_builtin_tensor %354 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_155 = arith.constant 1 : index | |
%dim_156 = tensor.dim %355, %c1_155 : tensor<1x?x32x50xcomplex<f32>> | |
%356 = flow.tensor.bitcast %355 : tensor<1x?x32x50xcomplex<f32>>{%dim_156} -> tensor<1x?x32x100xf32>{%dim_156} | |
%357 = torch_c.from_builtin_tensor %356 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %357, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_157 = torch.constant.int 5 | |
%358 = torch.prims.convert_element_type %357, %int5_157 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %358, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_158 = torch.constant.int 0 | |
%359 = torch.aten.size.int %237, %int0_158 : !torch.vtensor<[?,2662400],f16>, !torch.int -> !torch.int | |
%int26 = torch.constant.int 26 | |
%int2_159 = torch.constant.int 2 | |
%int16 = torch.constant.int 16 | |
%int32_160 = torch.constant.int 32 | |
%int100_161 = torch.constant.int 100 | |
%360 = torch.prim.ListConstruct %359, %int26, %int2_159, %int16, %int32_160, %int100_161 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%361 = torch.aten.view %237, %360 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %361, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_162 = torch.constant.int 26 | |
%362 = torch.aten.mul.int %359, %int26_162 : !torch.int, !torch.int -> !torch.int | |
%int2_163 = torch.constant.int 2 | |
%363 = torch.aten.mul.int %362, %int2_163 : !torch.int, !torch.int -> !torch.int | |
%int16_164 = torch.constant.int 16 | |
%int32_165 = torch.constant.int 32 | |
%int100_166 = torch.constant.int 100 | |
%364 = torch.prim.ListConstruct %363, %int16_164, %int32_165, %int100_166 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%365 = torch.aten.view %361, %364 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %365, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int52 = torch.constant.int 52 | |
%366 = torch.aten.mul.Scalar %arg2, %int52 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %366, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int0_167 = torch.constant.int 0 | |
%int1_168 = torch.constant.int 1 | |
%367 = torch.aten.add.Scalar %366, %int0_167, %int1_168 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %367, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_169 = torch.constant.int 1 | |
%368 = torch.aten.size.int %arg2, %int1_169 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.int | |
%int1_170 = torch.constant.int 1 | |
%int16_171 = torch.constant.int 16 | |
%int32_172 = torch.constant.int 32 | |
%int100_173 = torch.constant.int 100 | |
%369 = torch.prim.ListConstruct %int1_170, %368, %int16_171, %int32_172, %int100_173 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%370 = torch.aten.view %358, %369 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %370, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_174 = torch.constant.int 16 | |
%int32_175 = torch.constant.int 32 | |
%int100_176 = torch.constant.int 100 | |
%371 = torch.prim.ListConstruct %368, %int16_174, %int32_175, %int100_176 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%372 = torch.aten.view %370, %371 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %372, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%373 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%374 = torch.aten.view %367, %373 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %374, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_177 = torch.constant.int 1 | |
%int16_178 = torch.constant.int 16 | |
%int32_179 = torch.constant.int 32 | |
%int100_180 = torch.constant.int 100 | |
%375 = torch.prim.ListConstruct %int1_177, %368, %int16_178, %int32_179, %int100_180 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%376 = torch.aten.view %300, %375 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %376, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_181 = torch.constant.int 16 | |
%int32_182 = torch.constant.int 32 | |
%int100_183 = torch.constant.int 100 | |
%377 = torch.prim.ListConstruct %368, %int16_181, %int32_182, %int100_183 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%378 = torch.aten.view %376, %377 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %378, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_184 = torch.constant.int 1 | |
%int1_185 = torch.constant.int 1 | |
%379 = torch.aten.add.Scalar %367, %int1_184, %int1_185 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %379, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%380 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%381 = torch.aten.view %379, %380 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %381, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%382 = torch.prim.ListConstruct %374, %381 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_186 = torch.constant.int 0 | |
%383 = torch.aten.cat %382, %int0_186 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %383, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%384 = torch.prim.ListConstruct %372, %378 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_187 = torch.constant.int 0 | |
%385 = torch.aten.cat %384, %int0_187 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %385, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%386 = torch.prim.ListConstruct %383 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_188 = torch.constant.bool false | |
%387 = torch.aten.index_put %365, %386, %385, %false_188 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %387, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_189 = torch.constant.int 26 | |
%int2_190 = torch.constant.int 2 | |
%int16_191 = torch.constant.int 16 | |
%int32_192 = torch.constant.int 32 | |
%int100_193 = torch.constant.int 100 | |
%388 = torch.prim.ListConstruct %359, %int26_189, %int2_190, %int16_191, %int32_192, %int100_193 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%389 = torch.aten.view %387, %388 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %389, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400 = torch.constant.int 2662400 | |
%390 = torch.prim.ListConstruct %359, %int2662400 : (!torch.int, !torch.int) -> !torch.list<int> | |
%391 = torch.aten.view %389, %390 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %391, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_194 = torch.constant.int 1 | |
%int2_195 = torch.constant.int 2 | |
%392 = torch.aten.transpose.int %329, %int1_194, %int2_195 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %392, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_196 = torch.constant.int 1 | |
%int2_197 = torch.constant.int 2 | |
%393 = torch.aten.transpose.int %358, %int1_196, %int2_197 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %393, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_198 = torch.constant.int 1 | |
%int2_199 = torch.constant.int 2 | |
%394 = torch.aten.transpose.int %300, %int1_198, %int2_199 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %394, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_200 = torch.constant.int 2 | |
%int3_201 = torch.constant.int 3 | |
%395 = torch.aten.transpose.int %393, %int2_200, %int3_201 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %395, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_202 = torch.constant.int 1 | |
%int32_203 = torch.constant.int 32 | |
%int100_204 = torch.constant.int 100 | |
%396 = torch.prim.ListConstruct %int1_202, %int32_203, %315, %int100_204 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_205 = torch.constant.bool false | |
%397 = torch.aten.expand %392, %396, %false_205 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %397, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_206 = torch.constant.int 32 | |
%int100_207 = torch.constant.int 100 | |
%398 = torch.prim.ListConstruct %int32_206, %315, %int100_207 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%399 = torch.aten.view %397, %398 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %399, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_208 = torch.constant.int 1 | |
%int32_209 = torch.constant.int 32 | |
%int100_210 = torch.constant.int 100 | |
%400 = torch.prim.ListConstruct %int1_208, %int32_209, %int100_210, %344 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_211 = torch.constant.bool false | |
%401 = torch.aten.expand %395, %400, %false_211 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %401, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_212 = torch.constant.int 32 | |
%int100_213 = torch.constant.int 100 | |
%402 = torch.prim.ListConstruct %int32_212, %int100_213, %344 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%403 = torch.aten.view %401, %402 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %403, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%404 = torch.aten.bmm %399, %403 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %404, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_214 = torch.constant.int 1 | |
%int32_215 = torch.constant.int 32 | |
%405 = torch.prim.ListConstruct %int1_214, %int32_215, %315, %344 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%406 = torch.aten.view %404, %405 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %406, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01 = torch.constant.float 1.000000e+01 | |
%407 = torch.aten.div.Scalar %406, %float1.000000e01 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %407, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_216 = torch.constant.int 1 | |
%408 = torch.aten.add.Tensor %407, %266, %int1_216 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %408, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_217 = torch.constant.int 6 | |
%409 = torch.prims.convert_element_type %408, %int6_217 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %409, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_218 = torch.constant.int -1 | |
%false_219 = torch.constant.bool false | |
%410 = torch.aten._softmax %409, %int-1_218, %false_219 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %410, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_220 = torch.constant.int 5 | |
%411 = torch.prims.convert_element_type %410, %int5_220 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %411, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_221 = torch.constant.int 1 | |
%int32_222 = torch.constant.int 32 | |
%412 = torch.prim.ListConstruct %int1_221, %int32_222, %315, %344 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_223 = torch.constant.bool false | |
%413 = torch.aten.expand %411, %412, %false_223 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %413, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_224 = torch.constant.int 32 | |
%414 = torch.prim.ListConstruct %int32_224, %315, %344 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%415 = torch.aten.view %413, %414 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %415, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_225 = torch.constant.int 1 | |
%416 = torch.aten.size.int %294, %int1_225 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_226 = torch.constant.int 1 | |
%int32_227 = torch.constant.int 32 | |
%int100_228 = torch.constant.int 100 | |
%417 = torch.prim.ListConstruct %int1_226, %int32_227, %416, %int100_228 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_229 = torch.constant.bool false | |
%418 = torch.aten.expand %394, %417, %false_229 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %418, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_230 = torch.constant.int 32 | |
%int100_231 = torch.constant.int 100 | |
%419 = torch.prim.ListConstruct %int32_230, %416, %int100_231 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%420 = torch.aten.view %418, %419 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %420, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%421 = torch.aten.bmm %415, %420 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %421, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_232 = torch.constant.int 1 | |
%int32_233 = torch.constant.int 32 | |
%int100_234 = torch.constant.int 100 | |
%422 = torch.prim.ListConstruct %int1_232, %int32_233, %315, %int100_234 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%423 = torch.aten.view %421, %422 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %423, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_235 = torch.constant.int 1 | |
%int2_236 = torch.constant.int 2 | |
%424 = torch.aten.transpose.int %423, %int1_235, %int2_236 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %424, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_237 = torch.constant.int 0 | |
%425 = torch.aten.clone %424, %int0_237 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %425, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_238 = torch.constant.int 1 | |
%int3200_239 = torch.constant.int 3200 | |
%426 = torch.prim.ListConstruct %int1_238, %315, %int3200_239 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%427 = torch.aten._unsafe_view %425, %426 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %427, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_240 = torch.constant.int -2 | |
%int-1_241 = torch.constant.int -1 | |
%428 = torch.aten.transpose.int %5, %int-2_240, %int-1_241 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_242 = torch.constant.int 3200 | |
%429 = torch.prim.ListConstruct %315, %int3200_242 : (!torch.int, !torch.int) -> !torch.list<int> | |
%430 = torch.aten.view %427, %429 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %430, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%431 = torch.aten.mm %430, %428 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %431, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_243 = torch.constant.int 1 | |
%int3200_244 = torch.constant.int 3200 | |
%432 = torch.prim.ListConstruct %int1_243, %315, %int3200_244 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%433 = torch.aten.view %431, %432 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %433, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_245 = torch.constant.int 1 | |
%434 = torch.aten.add.Tensor %267, %433, %int1_245 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %434, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_246 = torch.constant.int 6 | |
%435 = torch.prims.convert_element_type %434, %int6_246 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %435, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_247 = torch.constant.int 2 | |
%436 = torch.aten.pow.Tensor_Scalar %435, %int2_247 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %436, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_248 = torch.constant.int -1 | |
%437 = torch.prim.ListConstruct %int-1_248 : (!torch.int) -> !torch.list<int> | |
%true_249 = torch.constant.bool true | |
%none_250 = torch.constant.none | |
%438 = torch.aten.mean.dim %436, %437, %true_249, %none_250 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %438, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_251 = torch.constant.float 9.9999999747524271E-7 | |
%int1_252 = torch.constant.int 1 | |
%439 = torch.aten.add.Scalar %438, %float9.999990e-07_251, %int1_252 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %439, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%440 = torch.aten.rsqrt %439 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %440, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%441 = torch.aten.mul.Tensor %435, %440 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %441, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%442 = torch.aten.mul.Tensor %6, %441 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %442, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_253 = torch.constant.int 5 | |
%443 = torch.prims.convert_element_type %442, %int5_253 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %443, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_254 = torch.constant.int -2 | |
%int-1_255 = torch.constant.int -1 | |
%444 = torch.aten.transpose.int %7, %int-2_254, %int-1_255 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_256 = torch.constant.int 3200 | |
%445 = torch.prim.ListConstruct %240, %int3200_256 : (!torch.int, !torch.int) -> !torch.list<int> | |
%446 = torch.aten.view %443, %445 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %446, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%447 = torch.aten.mm %446, %444 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %447, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_257 = torch.constant.int 1 | |
%int8640 = torch.constant.int 8640 | |
%448 = torch.prim.ListConstruct %int1_257, %240, %int8640 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%449 = torch.aten.view %447, %448 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %449, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%450 = torch.aten.silu %449 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %450, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_258 = torch.constant.int -2 | |
%int-1_259 = torch.constant.int -1 | |
%451 = torch.aten.transpose.int %8, %int-2_258, %int-1_259 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_260 = torch.constant.int 3200 | |
%452 = torch.prim.ListConstruct %240, %int3200_260 : (!torch.int, !torch.int) -> !torch.list<int> | |
%453 = torch.aten.view %443, %452 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %453, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%454 = torch.aten.mm %453, %451 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %454, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_261 = torch.constant.int 1 | |
%int8640_262 = torch.constant.int 8640 | |
%455 = torch.prim.ListConstruct %int1_261, %240, %int8640_262 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%456 = torch.aten.view %454, %455 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %456, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%457 = torch.aten.mul.Tensor %450, %456 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %457, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_263 = torch.constant.int -2 | |
%int-1_264 = torch.constant.int -1 | |
%458 = torch.aten.transpose.int %9, %int-2_263, %int-1_264 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_265 = torch.constant.int 1 | |
%459 = torch.aten.size.int %449, %int1_265 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_266 = torch.constant.int 8640 | |
%460 = torch.prim.ListConstruct %459, %int8640_266 : (!torch.int, !torch.int) -> !torch.list<int> | |
%461 = torch.aten.view %457, %460 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %461, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%462 = torch.aten.mm %461, %458 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %462, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_267 = torch.constant.int 1 | |
%int3200_268 = torch.constant.int 3200 | |
%463 = torch.prim.ListConstruct %int1_267, %459, %int3200_268 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%464 = torch.aten.view %462, %463 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %464, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_269 = torch.constant.int 1 | |
%465 = torch.aten.add.Tensor %434, %464, %int1_269 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %465, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_270 = torch.constant.int 6 | |
%466 = torch.prims.convert_element_type %465, %int6_270 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %466, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_271 = torch.constant.int 2 | |
%467 = torch.aten.pow.Tensor_Scalar %466, %int2_271 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %467, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_272 = torch.constant.int -1 | |
%468 = torch.prim.ListConstruct %int-1_272 : (!torch.int) -> !torch.list<int> | |
%true_273 = torch.constant.bool true | |
%none_274 = torch.constant.none | |
%469 = torch.aten.mean.dim %467, %468, %true_273, %none_274 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %469, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_275 = torch.constant.float 9.9999999747524271E-7 | |
%int1_276 = torch.constant.int 1 | |
%470 = torch.aten.add.Scalar %469, %float9.999990e-07_275, %int1_276 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %470, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%471 = torch.aten.rsqrt %470 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %471, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%472 = torch.aten.mul.Tensor %466, %471 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %472, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%473 = torch.aten.mul.Tensor %10, %472 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %473, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_277 = torch.constant.int 5 | |
%474 = torch.prims.convert_element_type %473, %int5_277 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %474, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_278 = torch.constant.int -2 | |
%int-1_279 = torch.constant.int -1 | |
%475 = torch.aten.transpose.int %11, %int-2_278, %int-1_279 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_280 = torch.constant.int 3200 | |
%476 = torch.prim.ListConstruct %240, %int3200_280 : (!torch.int, !torch.int) -> !torch.list<int> | |
%477 = torch.aten.view %474, %476 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %477, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%478 = torch.aten.mm %477, %475 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %478, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_281 = torch.constant.int 1 | |
%int3200_282 = torch.constant.int 3200 | |
%479 = torch.prim.ListConstruct %int1_281, %240, %int3200_282 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%480 = torch.aten.view %478, %479 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %480, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_283 = torch.constant.int -2 | |
%int-1_284 = torch.constant.int -1 | |
%481 = torch.aten.transpose.int %12, %int-2_283, %int-1_284 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_285 = torch.constant.int 3200 | |
%482 = torch.prim.ListConstruct %240, %int3200_285 : (!torch.int, !torch.int) -> !torch.list<int> | |
%483 = torch.aten.view %474, %482 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %483, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%484 = torch.aten.mm %483, %481 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %484, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_286 = torch.constant.int 1 | |
%int3200_287 = torch.constant.int 3200 | |
%485 = torch.prim.ListConstruct %int1_286, %240, %int3200_287 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%486 = torch.aten.view %484, %485 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %486, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_288 = torch.constant.int -2 | |
%int-1_289 = torch.constant.int -1 | |
%487 = torch.aten.transpose.int %13, %int-2_288, %int-1_289 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_290 = torch.constant.int 3200 | |
%488 = torch.prim.ListConstruct %240, %int3200_290 : (!torch.int, !torch.int) -> !torch.list<int> | |
%489 = torch.aten.view %474, %488 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %489, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%490 = torch.aten.mm %489, %487 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %490, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_291 = torch.constant.int 1 | |
%int3200_292 = torch.constant.int 3200 | |
%491 = torch.prim.ListConstruct %int1_291, %240, %int3200_292 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%492 = torch.aten.view %490, %491 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %492, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_293 = torch.constant.int 1 | |
%int32_294 = torch.constant.int 32 | |
%int100_295 = torch.constant.int 100 | |
%493 = torch.prim.ListConstruct %int1_293, %240, %int32_294, %int100_295 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%494 = torch.aten.view %480, %493 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %494, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_296 = torch.constant.int 1 | |
%int32_297 = torch.constant.int 32 | |
%int100_298 = torch.constant.int 100 | |
%495 = torch.prim.ListConstruct %int1_296, %240, %int32_297, %int100_298 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%496 = torch.aten.view %486, %495 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %496, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_299 = torch.constant.int 1 | |
%int32_300 = torch.constant.int 32 | |
%int100_301 = torch.constant.int 100 | |
%497 = torch.prim.ListConstruct %int1_299, %240, %int32_300, %int100_301 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%498 = torch.aten.view %492, %497 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %498, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_302 = torch.constant.int 2048 | |
%none_303 = torch.constant.none | |
%none_304 = torch.constant.none | |
%cpu_305 = torch.constant.device "cpu" | |
%false_306 = torch.constant.bool false | |
%499 = torch.aten.arange %int2048_302, %none_303, %none_304, %cpu_305, %false_306 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_307 = torch.constant.int 0 | |
%int100_308 = torch.constant.int 100 | |
%int2_309 = torch.constant.int 2 | |
%none_310 = torch.constant.none | |
%none_311 = torch.constant.none | |
%cpu_312 = torch.constant.device "cpu" | |
%false_313 = torch.constant.bool false | |
%500 = torch.aten.arange.start_step %int0_307, %int100_308, %int2_309, %none_310, %none_311, %cpu_312, %false_313 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_314 = torch.constant.int 0 | |
%int0_315 = torch.constant.int 0 | |
%int50_316 = torch.constant.int 50 | |
%int1_317 = torch.constant.int 1 | |
%501 = torch.aten.slice.Tensor %500, %int0_314, %int0_315, %int50_316, %int1_317 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_318 = torch.constant.int 6 | |
%502 = torch.prims.convert_element_type %501, %int6_318 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_319 = torch.constant.int 100 | |
%503 = torch.aten.div.Scalar %502, %int100_319 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_320 = torch.constant.float 1.000000e+04 | |
%504 = torch.aten.pow.Scalar %float1.000000e04_320, %503 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%505 = torch.aten.reciprocal %504 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_321 = torch.constant.float 1.000000e+00 | |
%506 = torch.aten.mul.Scalar %505, %float1.000000e00_321 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_322 = torch.constant.int 2048 | |
%int1_323 = torch.constant.int 1 | |
%507 = torch.prim.ListConstruct %int2048_322, %int1_323 : (!torch.int, !torch.int) -> !torch.list<int> | |
%508 = torch.aten.view %499, %507 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%509 = torch.aten.mul.Tensor %508, %506 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%510 = torch.aten.cos %509 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%511 = torch.aten.sin %509 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%512 = torch.aten.complex %510, %511 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_324 = torch.constant.int 1 | |
%513 = torch.aten.size.int %480, %int1_324 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_325 = torch.constant.int 0 | |
%514 = torch.aten.add.int %int0_325, %513 : !torch.int, !torch.int -> !torch.int | |
%int0_326 = torch.constant.int 0 | |
%int0_327 = torch.constant.int 0 | |
%int1_328 = torch.constant.int 1 | |
%515 = torch.aten.slice.Tensor %512, %int0_326, %int0_327, %514, %int1_328 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %515, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_329 = torch.constant.int 1 | |
%int0_330 = torch.constant.int 0 | |
%int9223372036854775807_331 = torch.constant.int 9223372036854775807 | |
%int1_332 = torch.constant.int 1 | |
%516 = torch.aten.slice.Tensor %515, %int1_329, %int0_330, %int9223372036854775807_331, %int1_332 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %516, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_333 = torch.constant.int 0 | |
%517 = torch.aten.unsqueeze %516, %int0_333 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %517, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_334 = torch.constant.int 2 | |
%518 = torch.aten.unsqueeze %517, %int2_334 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %518, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_335 = torch.constant.int 3 | |
%int0_336 = torch.constant.int 0 | |
%int9223372036854775807_337 = torch.constant.int 9223372036854775807 | |
%int1_338 = torch.constant.int 1 | |
%519 = torch.aten.slice.Tensor %518, %int3_335, %int0_336, %int9223372036854775807_337, %int1_338 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %519, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%520 = torch_c.to_builtin_tensor %494 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_339 = arith.constant 1 : index | |
%dim_340 = tensor.dim %520, %c1_339 : tensor<1x?x32x100xf16> | |
%521 = flow.tensor.bitcast %520 : tensor<1x?x32x100xf16>{%dim_340} -> tensor<1x?x32x50xcomplex<f16>>{%dim_340} | |
%522 = torch_c.from_builtin_tensor %521 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %522, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%523 = torch.aten.mul.Tensor %522, %519 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %523, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%524 = torch_c.to_builtin_tensor %523 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_341 = arith.constant 1 : index | |
%dim_342 = tensor.dim %524, %c1_341 : tensor<1x?x32x50xcomplex<f32>> | |
%525 = flow.tensor.bitcast %524 : tensor<1x?x32x50xcomplex<f32>>{%dim_342} -> tensor<1x?x32x100xf32>{%dim_342} | |
%526 = torch_c.from_builtin_tensor %525 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %526, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_343 = torch.constant.int 5 | |
%527 = torch.prims.convert_element_type %526, %int5_343 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %527, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_344 = torch.constant.int 2048 | |
%none_345 = torch.constant.none | |
%none_346 = torch.constant.none | |
%cpu_347 = torch.constant.device "cpu" | |
%false_348 = torch.constant.bool false | |
%528 = torch.aten.arange %int2048_344, %none_345, %none_346, %cpu_347, %false_348 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_349 = torch.constant.int 0 | |
%int100_350 = torch.constant.int 100 | |
%int2_351 = torch.constant.int 2 | |
%none_352 = torch.constant.none | |
%none_353 = torch.constant.none | |
%cpu_354 = torch.constant.device "cpu" | |
%false_355 = torch.constant.bool false | |
%529 = torch.aten.arange.start_step %int0_349, %int100_350, %int2_351, %none_352, %none_353, %cpu_354, %false_355 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_356 = torch.constant.int 0 | |
%int0_357 = torch.constant.int 0 | |
%int50_358 = torch.constant.int 50 | |
%int1_359 = torch.constant.int 1 | |
%530 = torch.aten.slice.Tensor %529, %int0_356, %int0_357, %int50_358, %int1_359 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_360 = torch.constant.int 6 | |
%531 = torch.prims.convert_element_type %530, %int6_360 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_361 = torch.constant.int 100 | |
%532 = torch.aten.div.Scalar %531, %int100_361 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_362 = torch.constant.float 1.000000e+04 | |
%533 = torch.aten.pow.Scalar %float1.000000e04_362, %532 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%534 = torch.aten.reciprocal %533 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_363 = torch.constant.float 1.000000e+00 | |
%535 = torch.aten.mul.Scalar %534, %float1.000000e00_363 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_364 = torch.constant.int 2048 | |
%int1_365 = torch.constant.int 1 | |
%536 = torch.prim.ListConstruct %int2048_364, %int1_365 : (!torch.int, !torch.int) -> !torch.list<int> | |
%537 = torch.aten.view %528, %536 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%538 = torch.aten.mul.Tensor %537, %535 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%539 = torch.aten.cos %538 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%540 = torch.aten.sin %538 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%541 = torch.aten.complex %539, %540 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_366 = torch.constant.int 1 | |
%542 = torch.aten.size.int %486, %int1_366 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_367 = torch.constant.int 0 | |
%543 = torch.aten.add.int %int0_367, %542 : !torch.int, !torch.int -> !torch.int | |
%int0_368 = torch.constant.int 0 | |
%int0_369 = torch.constant.int 0 | |
%int1_370 = torch.constant.int 1 | |
%544 = torch.aten.slice.Tensor %541, %int0_368, %int0_369, %543, %int1_370 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %544, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_371 = torch.constant.int 1 | |
%int0_372 = torch.constant.int 0 | |
%int9223372036854775807_373 = torch.constant.int 9223372036854775807 | |
%int1_374 = torch.constant.int 1 | |
%545 = torch.aten.slice.Tensor %544, %int1_371, %int0_372, %int9223372036854775807_373, %int1_374 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %545, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_375 = torch.constant.int 0 | |
%546 = torch.aten.unsqueeze %545, %int0_375 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %546, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_376 = torch.constant.int 2 | |
%547 = torch.aten.unsqueeze %546, %int2_376 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %547, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_377 = torch.constant.int 3 | |
%int0_378 = torch.constant.int 0 | |
%int9223372036854775807_379 = torch.constant.int 9223372036854775807 | |
%int1_380 = torch.constant.int 1 | |
%548 = torch.aten.slice.Tensor %547, %int3_377, %int0_378, %int9223372036854775807_379, %int1_380 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %548, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%549 = torch_c.to_builtin_tensor %496 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_381 = arith.constant 1 : index | |
%dim_382 = tensor.dim %549, %c1_381 : tensor<1x?x32x100xf16> | |
%550 = flow.tensor.bitcast %549 : tensor<1x?x32x100xf16>{%dim_382} -> tensor<1x?x32x50xcomplex<f16>>{%dim_382} | |
%551 = torch_c.from_builtin_tensor %550 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %551, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%552 = torch.aten.mul.Tensor %551, %548 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %552, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%553 = torch_c.to_builtin_tensor %552 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_383 = arith.constant 1 : index | |
%dim_384 = tensor.dim %553, %c1_383 : tensor<1x?x32x50xcomplex<f32>> | |
%554 = flow.tensor.bitcast %553 : tensor<1x?x32x50xcomplex<f32>>{%dim_384} -> tensor<1x?x32x100xf32>{%dim_384} | |
%555 = torch_c.from_builtin_tensor %554 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %555, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_385 = torch.constant.int 5 | |
%556 = torch.prims.convert_element_type %555, %int5_385 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %556, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_386 = torch.constant.int 52 | |
%557 = torch.aten.mul.Scalar %arg2, %int52_386 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %557, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int2_387 = torch.constant.int 2 | |
%int1_388 = torch.constant.int 1 | |
%558 = torch.aten.add.Scalar %557, %int2_387, %int1_388 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %558, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_389 = torch.constant.int 1 | |
%int16_390 = torch.constant.int 16 | |
%int32_391 = torch.constant.int 32 | |
%int100_392 = torch.constant.int 100 | |
%559 = torch.prim.ListConstruct %int1_389, %368, %int16_390, %int32_391, %int100_392 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%560 = torch.aten.view %556, %559 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %560, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_393 = torch.constant.int 16 | |
%int32_394 = torch.constant.int 32 | |
%int100_395 = torch.constant.int 100 | |
%561 = torch.prim.ListConstruct %368, %int16_393, %int32_394, %int100_395 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%562 = torch.aten.view %560, %561 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %562, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%563 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%564 = torch.aten.view %558, %563 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %564, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_396 = torch.constant.int 1 | |
%int16_397 = torch.constant.int 16 | |
%int32_398 = torch.constant.int 32 | |
%int100_399 = torch.constant.int 100 | |
%565 = torch.prim.ListConstruct %int1_396, %368, %int16_397, %int32_398, %int100_399 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%566 = torch.aten.view %498, %565 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %566, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_400 = torch.constant.int 16 | |
%int32_401 = torch.constant.int 32 | |
%int100_402 = torch.constant.int 100 | |
%567 = torch.prim.ListConstruct %368, %int16_400, %int32_401, %int100_402 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%568 = torch.aten.view %566, %567 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %568, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_403 = torch.constant.int 1 | |
%int1_404 = torch.constant.int 1 | |
%569 = torch.aten.add.Scalar %558, %int1_403, %int1_404 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %569, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%570 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%571 = torch.aten.view %569, %570 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %571, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%572 = torch.prim.ListConstruct %564, %571 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_405 = torch.constant.int 0 | |
%573 = torch.aten.cat %572, %int0_405 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %573, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%574 = torch.prim.ListConstruct %562, %568 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_406 = torch.constant.int 0 | |
%575 = torch.aten.cat %574, %int0_406 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %575, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_407 = torch.constant.int 26 | |
%int2_408 = torch.constant.int 2 | |
%int16_409 = torch.constant.int 16 | |
%int32_410 = torch.constant.int 32 | |
%int100_411 = torch.constant.int 100 | |
%576 = torch.prim.ListConstruct %359, %int26_407, %int2_408, %int16_409, %int32_410, %int100_411 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%577 = torch.aten.view %391, %576 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %577, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_412 = torch.constant.int 26 | |
%578 = torch.aten.mul.int %359, %int26_412 : !torch.int, !torch.int -> !torch.int | |
%int2_413 = torch.constant.int 2 | |
%579 = torch.aten.mul.int %578, %int2_413 : !torch.int, !torch.int -> !torch.int | |
%int16_414 = torch.constant.int 16 | |
%int32_415 = torch.constant.int 32 | |
%int100_416 = torch.constant.int 100 | |
%580 = torch.prim.ListConstruct %579, %int16_414, %int32_415, %int100_416 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%581 = torch.aten.view %577, %580 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %581, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%582 = torch.prim.ListConstruct %573 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_417 = torch.constant.bool false | |
%583 = torch.aten.index_put %581, %582, %575, %false_417 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %583, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_418 = torch.constant.int 26 | |
%int2_419 = torch.constant.int 2 | |
%int16_420 = torch.constant.int 16 | |
%int32_421 = torch.constant.int 32 | |
%int100_422 = torch.constant.int 100 | |
%584 = torch.prim.ListConstruct %359, %int26_418, %int2_419, %int16_420, %int32_421, %int100_422 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%585 = torch.aten.view %583, %584 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %585, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_423 = torch.constant.int 2662400 | |
%586 = torch.prim.ListConstruct %359, %int2662400_423 : (!torch.int, !torch.int) -> !torch.list<int> | |
%587 = torch.aten.view %585, %586 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %587, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_424 = torch.constant.int 1 | |
%int2_425 = torch.constant.int 2 | |
%588 = torch.aten.transpose.int %527, %int1_424, %int2_425 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %588, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_426 = torch.constant.int 1 | |
%int2_427 = torch.constant.int 2 | |
%589 = torch.aten.transpose.int %556, %int1_426, %int2_427 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %589, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_428 = torch.constant.int 1 | |
%int2_429 = torch.constant.int 2 | |
%590 = torch.aten.transpose.int %498, %int1_428, %int2_429 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %590, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_430 = torch.constant.int 2 | |
%int3_431 = torch.constant.int 3 | |
%591 = torch.aten.transpose.int %589, %int2_430, %int3_431 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %591, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_432 = torch.constant.int 1 | |
%int32_433 = torch.constant.int 32 | |
%int100_434 = torch.constant.int 100 | |
%592 = torch.prim.ListConstruct %int1_432, %int32_433, %513, %int100_434 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_435 = torch.constant.bool false | |
%593 = torch.aten.expand %588, %592, %false_435 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %593, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_436 = torch.constant.int 32 | |
%int100_437 = torch.constant.int 100 | |
%594 = torch.prim.ListConstruct %int32_436, %513, %int100_437 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%595 = torch.aten.view %593, %594 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %595, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_438 = torch.constant.int 1 | |
%int32_439 = torch.constant.int 32 | |
%int100_440 = torch.constant.int 100 | |
%596 = torch.prim.ListConstruct %int1_438, %int32_439, %int100_440, %542 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_441 = torch.constant.bool false | |
%597 = torch.aten.expand %591, %596, %false_441 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %597, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_442 = torch.constant.int 32 | |
%int100_443 = torch.constant.int 100 | |
%598 = torch.prim.ListConstruct %int32_442, %int100_443, %542 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%599 = torch.aten.view %597, %598 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %599, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%600 = torch.aten.bmm %595, %599 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %600, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_444 = torch.constant.int 1 | |
%int32_445 = torch.constant.int 32 | |
%601 = torch.prim.ListConstruct %int1_444, %int32_445, %513, %542 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%602 = torch.aten.view %600, %601 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %602, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_446 = torch.constant.float 1.000000e+01 | |
%603 = torch.aten.div.Scalar %602, %float1.000000e01_446 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %603, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_447 = torch.constant.int 1 | |
%604 = torch.aten.add.Tensor %603, %266, %int1_447 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %604, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_448 = torch.constant.int 6 | |
%605 = torch.prims.convert_element_type %604, %int6_448 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %605, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_449 = torch.constant.int -1 | |
%false_450 = torch.constant.bool false | |
%606 = torch.aten._softmax %605, %int-1_449, %false_450 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %606, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_451 = torch.constant.int 5 | |
%607 = torch.prims.convert_element_type %606, %int5_451 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %607, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_452 = torch.constant.int 1 | |
%int32_453 = torch.constant.int 32 | |
%608 = torch.prim.ListConstruct %int1_452, %int32_453, %513, %542 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_454 = torch.constant.bool false | |
%609 = torch.aten.expand %607, %608, %false_454 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %609, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_455 = torch.constant.int 32 | |
%610 = torch.prim.ListConstruct %int32_455, %513, %542 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%611 = torch.aten.view %609, %610 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %611, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_456 = torch.constant.int 1 | |
%612 = torch.aten.size.int %492, %int1_456 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_457 = torch.constant.int 1 | |
%int32_458 = torch.constant.int 32 | |
%int100_459 = torch.constant.int 100 | |
%613 = torch.prim.ListConstruct %int1_457, %int32_458, %612, %int100_459 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_460 = torch.constant.bool false | |
%614 = torch.aten.expand %590, %613, %false_460 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %614, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_461 = torch.constant.int 32 | |
%int100_462 = torch.constant.int 100 | |
%615 = torch.prim.ListConstruct %int32_461, %612, %int100_462 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%616 = torch.aten.view %614, %615 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %616, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%617 = torch.aten.bmm %611, %616 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %617, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_463 = torch.constant.int 1 | |
%int32_464 = torch.constant.int 32 | |
%int100_465 = torch.constant.int 100 | |
%618 = torch.prim.ListConstruct %int1_463, %int32_464, %513, %int100_465 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%619 = torch.aten.view %617, %618 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %619, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_466 = torch.constant.int 1 | |
%int2_467 = torch.constant.int 2 | |
%620 = torch.aten.transpose.int %619, %int1_466, %int2_467 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %620, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_468 = torch.constant.int 0 | |
%621 = torch.aten.clone %620, %int0_468 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %621, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_469 = torch.constant.int 1 | |
%int3200_470 = torch.constant.int 3200 | |
%622 = torch.prim.ListConstruct %int1_469, %513, %int3200_470 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%623 = torch.aten._unsafe_view %621, %622 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %623, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_471 = torch.constant.int -2 | |
%int-1_472 = torch.constant.int -1 | |
%624 = torch.aten.transpose.int %14, %int-2_471, %int-1_472 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_473 = torch.constant.int 3200 | |
%625 = torch.prim.ListConstruct %513, %int3200_473 : (!torch.int, !torch.int) -> !torch.list<int> | |
%626 = torch.aten.view %623, %625 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %626, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%627 = torch.aten.mm %626, %624 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %627, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_474 = torch.constant.int 1 | |
%int3200_475 = torch.constant.int 3200 | |
%628 = torch.prim.ListConstruct %int1_474, %513, %int3200_475 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%629 = torch.aten.view %627, %628 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %629, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_476 = torch.constant.int 1 | |
%630 = torch.aten.add.Tensor %465, %629, %int1_476 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %630, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_477 = torch.constant.int 6 | |
%631 = torch.prims.convert_element_type %630, %int6_477 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %631, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_478 = torch.constant.int 2 | |
%632 = torch.aten.pow.Tensor_Scalar %631, %int2_478 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %632, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_479 = torch.constant.int -1 | |
%633 = torch.prim.ListConstruct %int-1_479 : (!torch.int) -> !torch.list<int> | |
%true_480 = torch.constant.bool true | |
%none_481 = torch.constant.none | |
%634 = torch.aten.mean.dim %632, %633, %true_480, %none_481 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %634, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_482 = torch.constant.float 9.9999999747524271E-7 | |
%int1_483 = torch.constant.int 1 | |
%635 = torch.aten.add.Scalar %634, %float9.999990e-07_482, %int1_483 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %635, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%636 = torch.aten.rsqrt %635 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %636, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%637 = torch.aten.mul.Tensor %631, %636 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %637, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%638 = torch.aten.mul.Tensor %15, %637 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %638, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_484 = torch.constant.int 5 | |
%639 = torch.prims.convert_element_type %638, %int5_484 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %639, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_485 = torch.constant.int -2 | |
%int-1_486 = torch.constant.int -1 | |
%640 = torch.aten.transpose.int %16, %int-2_485, %int-1_486 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_487 = torch.constant.int 3200 | |
%641 = torch.prim.ListConstruct %240, %int3200_487 : (!torch.int, !torch.int) -> !torch.list<int> | |
%642 = torch.aten.view %639, %641 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %642, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%643 = torch.aten.mm %642, %640 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %643, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_488 = torch.constant.int 1 | |
%int8640_489 = torch.constant.int 8640 | |
%644 = torch.prim.ListConstruct %int1_488, %240, %int8640_489 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%645 = torch.aten.view %643, %644 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %645, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%646 = torch.aten.silu %645 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %646, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_490 = torch.constant.int -2 | |
%int-1_491 = torch.constant.int -1 | |
%647 = torch.aten.transpose.int %17, %int-2_490, %int-1_491 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_492 = torch.constant.int 3200 | |
%648 = torch.prim.ListConstruct %240, %int3200_492 : (!torch.int, !torch.int) -> !torch.list<int> | |
%649 = torch.aten.view %639, %648 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %649, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%650 = torch.aten.mm %649, %647 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %650, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_493 = torch.constant.int 1 | |
%int8640_494 = torch.constant.int 8640 | |
%651 = torch.prim.ListConstruct %int1_493, %240, %int8640_494 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%652 = torch.aten.view %650, %651 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %652, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%653 = torch.aten.mul.Tensor %646, %652 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %653, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_495 = torch.constant.int -2 | |
%int-1_496 = torch.constant.int -1 | |
%654 = torch.aten.transpose.int %18, %int-2_495, %int-1_496 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_497 = torch.constant.int 1 | |
%655 = torch.aten.size.int %645, %int1_497 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_498 = torch.constant.int 8640 | |
%656 = torch.prim.ListConstruct %655, %int8640_498 : (!torch.int, !torch.int) -> !torch.list<int> | |
%657 = torch.aten.view %653, %656 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %657, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%658 = torch.aten.mm %657, %654 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %658, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_499 = torch.constant.int 1 | |
%int3200_500 = torch.constant.int 3200 | |
%659 = torch.prim.ListConstruct %int1_499, %655, %int3200_500 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%660 = torch.aten.view %658, %659 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %660, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_501 = torch.constant.int 1 | |
%661 = torch.aten.add.Tensor %630, %660, %int1_501 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %661, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_502 = torch.constant.int 6 | |
%662 = torch.prims.convert_element_type %661, %int6_502 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %662, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_503 = torch.constant.int 2 | |
%663 = torch.aten.pow.Tensor_Scalar %662, %int2_503 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %663, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_504 = torch.constant.int -1 | |
%664 = torch.prim.ListConstruct %int-1_504 : (!torch.int) -> !torch.list<int> | |
%true_505 = torch.constant.bool true | |
%none_506 = torch.constant.none | |
%665 = torch.aten.mean.dim %663, %664, %true_505, %none_506 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %665, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_507 = torch.constant.float 9.9999999747524271E-7 | |
%int1_508 = torch.constant.int 1 | |
%666 = torch.aten.add.Scalar %665, %float9.999990e-07_507, %int1_508 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %666, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%667 = torch.aten.rsqrt %666 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %667, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%668 = torch.aten.mul.Tensor %662, %667 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %668, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%669 = torch.aten.mul.Tensor %19, %668 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %669, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_509 = torch.constant.int 5 | |
%670 = torch.prims.convert_element_type %669, %int5_509 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %670, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_510 = torch.constant.int -2 | |
%int-1_511 = torch.constant.int -1 | |
%671 = torch.aten.transpose.int %20, %int-2_510, %int-1_511 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_512 = torch.constant.int 3200 | |
%672 = torch.prim.ListConstruct %240, %int3200_512 : (!torch.int, !torch.int) -> !torch.list<int> | |
%673 = torch.aten.view %670, %672 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %673, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%674 = torch.aten.mm %673, %671 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %674, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_513 = torch.constant.int 1 | |
%int3200_514 = torch.constant.int 3200 | |
%675 = torch.prim.ListConstruct %int1_513, %240, %int3200_514 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%676 = torch.aten.view %674, %675 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %676, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_515 = torch.constant.int -2 | |
%int-1_516 = torch.constant.int -1 | |
%677 = torch.aten.transpose.int %21, %int-2_515, %int-1_516 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_517 = torch.constant.int 3200 | |
%678 = torch.prim.ListConstruct %240, %int3200_517 : (!torch.int, !torch.int) -> !torch.list<int> | |
%679 = torch.aten.view %670, %678 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %679, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%680 = torch.aten.mm %679, %677 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %680, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_518 = torch.constant.int 1 | |
%int3200_519 = torch.constant.int 3200 | |
%681 = torch.prim.ListConstruct %int1_518, %240, %int3200_519 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%682 = torch.aten.view %680, %681 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %682, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_520 = torch.constant.int -2 | |
%int-1_521 = torch.constant.int -1 | |
%683 = torch.aten.transpose.int %22, %int-2_520, %int-1_521 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_522 = torch.constant.int 3200 | |
%684 = torch.prim.ListConstruct %240, %int3200_522 : (!torch.int, !torch.int) -> !torch.list<int> | |
%685 = torch.aten.view %670, %684 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %685, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%686 = torch.aten.mm %685, %683 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %686, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_523 = torch.constant.int 1 | |
%int3200_524 = torch.constant.int 3200 | |
%687 = torch.prim.ListConstruct %int1_523, %240, %int3200_524 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%688 = torch.aten.view %686, %687 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %688, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_525 = torch.constant.int 1 | |
%int32_526 = torch.constant.int 32 | |
%int100_527 = torch.constant.int 100 | |
%689 = torch.prim.ListConstruct %int1_525, %240, %int32_526, %int100_527 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%690 = torch.aten.view %676, %689 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %690, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_528 = torch.constant.int 1 | |
%int32_529 = torch.constant.int 32 | |
%int100_530 = torch.constant.int 100 | |
%691 = torch.prim.ListConstruct %int1_528, %240, %int32_529, %int100_530 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%692 = torch.aten.view %682, %691 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %692, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_531 = torch.constant.int 1 | |
%int32_532 = torch.constant.int 32 | |
%int100_533 = torch.constant.int 100 | |
%693 = torch.prim.ListConstruct %int1_531, %240, %int32_532, %int100_533 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%694 = torch.aten.view %688, %693 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %694, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_534 = torch.constant.int 2048 | |
%none_535 = torch.constant.none | |
%none_536 = torch.constant.none | |
%cpu_537 = torch.constant.device "cpu" | |
%false_538 = torch.constant.bool false | |
%695 = torch.aten.arange %int2048_534, %none_535, %none_536, %cpu_537, %false_538 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_539 = torch.constant.int 0 | |
%int100_540 = torch.constant.int 100 | |
%int2_541 = torch.constant.int 2 | |
%none_542 = torch.constant.none | |
%none_543 = torch.constant.none | |
%cpu_544 = torch.constant.device "cpu" | |
%false_545 = torch.constant.bool false | |
%696 = torch.aten.arange.start_step %int0_539, %int100_540, %int2_541, %none_542, %none_543, %cpu_544, %false_545 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_546 = torch.constant.int 0 | |
%int0_547 = torch.constant.int 0 | |
%int50_548 = torch.constant.int 50 | |
%int1_549 = torch.constant.int 1 | |
%697 = torch.aten.slice.Tensor %696, %int0_546, %int0_547, %int50_548, %int1_549 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_550 = torch.constant.int 6 | |
%698 = torch.prims.convert_element_type %697, %int6_550 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_551 = torch.constant.int 100 | |
%699 = torch.aten.div.Scalar %698, %int100_551 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_552 = torch.constant.float 1.000000e+04 | |
%700 = torch.aten.pow.Scalar %float1.000000e04_552, %699 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%701 = torch.aten.reciprocal %700 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_553 = torch.constant.float 1.000000e+00 | |
%702 = torch.aten.mul.Scalar %701, %float1.000000e00_553 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_554 = torch.constant.int 2048 | |
%int1_555 = torch.constant.int 1 | |
%703 = torch.prim.ListConstruct %int2048_554, %int1_555 : (!torch.int, !torch.int) -> !torch.list<int> | |
%704 = torch.aten.view %695, %703 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%705 = torch.aten.mul.Tensor %704, %702 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%706 = torch.aten.cos %705 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%707 = torch.aten.sin %705 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%708 = torch.aten.complex %706, %707 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_556 = torch.constant.int 1 | |
%709 = torch.aten.size.int %676, %int1_556 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_557 = torch.constant.int 0 | |
%710 = torch.aten.add.int %int0_557, %709 : !torch.int, !torch.int -> !torch.int | |
%int0_558 = torch.constant.int 0 | |
%int0_559 = torch.constant.int 0 | |
%int1_560 = torch.constant.int 1 | |
%711 = torch.aten.slice.Tensor %708, %int0_558, %int0_559, %710, %int1_560 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %711, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_561 = torch.constant.int 1 | |
%int0_562 = torch.constant.int 0 | |
%int9223372036854775807_563 = torch.constant.int 9223372036854775807 | |
%int1_564 = torch.constant.int 1 | |
%712 = torch.aten.slice.Tensor %711, %int1_561, %int0_562, %int9223372036854775807_563, %int1_564 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %712, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_565 = torch.constant.int 0 | |
%713 = torch.aten.unsqueeze %712, %int0_565 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %713, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_566 = torch.constant.int 2 | |
%714 = torch.aten.unsqueeze %713, %int2_566 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %714, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_567 = torch.constant.int 3 | |
%int0_568 = torch.constant.int 0 | |
%int9223372036854775807_569 = torch.constant.int 9223372036854775807 | |
%int1_570 = torch.constant.int 1 | |
%715 = torch.aten.slice.Tensor %714, %int3_567, %int0_568, %int9223372036854775807_569, %int1_570 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %715, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%716 = torch_c.to_builtin_tensor %690 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_571 = arith.constant 1 : index | |
%dim_572 = tensor.dim %716, %c1_571 : tensor<1x?x32x100xf16> | |
%717 = flow.tensor.bitcast %716 : tensor<1x?x32x100xf16>{%dim_572} -> tensor<1x?x32x50xcomplex<f16>>{%dim_572} | |
%718 = torch_c.from_builtin_tensor %717 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %718, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%719 = torch.aten.mul.Tensor %718, %715 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %719, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%720 = torch_c.to_builtin_tensor %719 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_573 = arith.constant 1 : index | |
%dim_574 = tensor.dim %720, %c1_573 : tensor<1x?x32x50xcomplex<f32>> | |
%721 = flow.tensor.bitcast %720 : tensor<1x?x32x50xcomplex<f32>>{%dim_574} -> tensor<1x?x32x100xf32>{%dim_574} | |
%722 = torch_c.from_builtin_tensor %721 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %722, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_575 = torch.constant.int 5 | |
%723 = torch.prims.convert_element_type %722, %int5_575 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %723, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_576 = torch.constant.int 2048 | |
%none_577 = torch.constant.none | |
%none_578 = torch.constant.none | |
%cpu_579 = torch.constant.device "cpu" | |
%false_580 = torch.constant.bool false | |
%724 = torch.aten.arange %int2048_576, %none_577, %none_578, %cpu_579, %false_580 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_581 = torch.constant.int 0 | |
%int100_582 = torch.constant.int 100 | |
%int2_583 = torch.constant.int 2 | |
%none_584 = torch.constant.none | |
%none_585 = torch.constant.none | |
%cpu_586 = torch.constant.device "cpu" | |
%false_587 = torch.constant.bool false | |
%725 = torch.aten.arange.start_step %int0_581, %int100_582, %int2_583, %none_584, %none_585, %cpu_586, %false_587 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_588 = torch.constant.int 0 | |
%int0_589 = torch.constant.int 0 | |
%int50_590 = torch.constant.int 50 | |
%int1_591 = torch.constant.int 1 | |
%726 = torch.aten.slice.Tensor %725, %int0_588, %int0_589, %int50_590, %int1_591 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_592 = torch.constant.int 6 | |
%727 = torch.prims.convert_element_type %726, %int6_592 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_593 = torch.constant.int 100 | |
%728 = torch.aten.div.Scalar %727, %int100_593 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_594 = torch.constant.float 1.000000e+04 | |
%729 = torch.aten.pow.Scalar %float1.000000e04_594, %728 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%730 = torch.aten.reciprocal %729 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_595 = torch.constant.float 1.000000e+00 | |
%731 = torch.aten.mul.Scalar %730, %float1.000000e00_595 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_596 = torch.constant.int 2048 | |
%int1_597 = torch.constant.int 1 | |
%732 = torch.prim.ListConstruct %int2048_596, %int1_597 : (!torch.int, !torch.int) -> !torch.list<int> | |
%733 = torch.aten.view %724, %732 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%734 = torch.aten.mul.Tensor %733, %731 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%735 = torch.aten.cos %734 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%736 = torch.aten.sin %734 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%737 = torch.aten.complex %735, %736 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_598 = torch.constant.int 1 | |
%738 = torch.aten.size.int %682, %int1_598 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_599 = torch.constant.int 0 | |
%739 = torch.aten.add.int %int0_599, %738 : !torch.int, !torch.int -> !torch.int | |
%int0_600 = torch.constant.int 0 | |
%int0_601 = torch.constant.int 0 | |
%int1_602 = torch.constant.int 1 | |
%740 = torch.aten.slice.Tensor %737, %int0_600, %int0_601, %739, %int1_602 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %740, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_603 = torch.constant.int 1 | |
%int0_604 = torch.constant.int 0 | |
%int9223372036854775807_605 = torch.constant.int 9223372036854775807 | |
%int1_606 = torch.constant.int 1 | |
%741 = torch.aten.slice.Tensor %740, %int1_603, %int0_604, %int9223372036854775807_605, %int1_606 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %741, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_607 = torch.constant.int 0 | |
%742 = torch.aten.unsqueeze %741, %int0_607 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %742, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_608 = torch.constant.int 2 | |
%743 = torch.aten.unsqueeze %742, %int2_608 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %743, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_609 = torch.constant.int 3 | |
%int0_610 = torch.constant.int 0 | |
%int9223372036854775807_611 = torch.constant.int 9223372036854775807 | |
%int1_612 = torch.constant.int 1 | |
%744 = torch.aten.slice.Tensor %743, %int3_609, %int0_610, %int9223372036854775807_611, %int1_612 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %744, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%745 = torch_c.to_builtin_tensor %692 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_613 = arith.constant 1 : index | |
%dim_614 = tensor.dim %745, %c1_613 : tensor<1x?x32x100xf16> | |
%746 = flow.tensor.bitcast %745 : tensor<1x?x32x100xf16>{%dim_614} -> tensor<1x?x32x50xcomplex<f16>>{%dim_614} | |
%747 = torch_c.from_builtin_tensor %746 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %747, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%748 = torch.aten.mul.Tensor %747, %744 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %748, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%749 = torch_c.to_builtin_tensor %748 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_615 = arith.constant 1 : index | |
%dim_616 = tensor.dim %749, %c1_615 : tensor<1x?x32x50xcomplex<f32>> | |
%750 = flow.tensor.bitcast %749 : tensor<1x?x32x50xcomplex<f32>>{%dim_616} -> tensor<1x?x32x100xf32>{%dim_616} | |
%751 = torch_c.from_builtin_tensor %750 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %751, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_617 = torch.constant.int 5 | |
%752 = torch.prims.convert_element_type %751, %int5_617 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %752, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_618 = torch.constant.int 52 | |
%753 = torch.aten.mul.Scalar %arg2, %int52_618 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %753, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int4 = torch.constant.int 4 | |
%int1_619 = torch.constant.int 1 | |
%754 = torch.aten.add.Scalar %753, %int4, %int1_619 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %754, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_620 = torch.constant.int 1 | |
%int16_621 = torch.constant.int 16 | |
%int32_622 = torch.constant.int 32 | |
%int100_623 = torch.constant.int 100 | |
%755 = torch.prim.ListConstruct %int1_620, %368, %int16_621, %int32_622, %int100_623 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%756 = torch.aten.view %752, %755 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %756, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_624 = torch.constant.int 16 | |
%int32_625 = torch.constant.int 32 | |
%int100_626 = torch.constant.int 100 | |
%757 = torch.prim.ListConstruct %368, %int16_624, %int32_625, %int100_626 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%758 = torch.aten.view %756, %757 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %758, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%759 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%760 = torch.aten.view %754, %759 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %760, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_627 = torch.constant.int 1 | |
%int16_628 = torch.constant.int 16 | |
%int32_629 = torch.constant.int 32 | |
%int100_630 = torch.constant.int 100 | |
%761 = torch.prim.ListConstruct %int1_627, %368, %int16_628, %int32_629, %int100_630 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%762 = torch.aten.view %694, %761 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %762, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_631 = torch.constant.int 16 | |
%int32_632 = torch.constant.int 32 | |
%int100_633 = torch.constant.int 100 | |
%763 = torch.prim.ListConstruct %368, %int16_631, %int32_632, %int100_633 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%764 = torch.aten.view %762, %763 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %764, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_634 = torch.constant.int 1 | |
%int1_635 = torch.constant.int 1 | |
%765 = torch.aten.add.Scalar %754, %int1_634, %int1_635 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %765, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%766 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%767 = torch.aten.view %765, %766 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %767, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%768 = torch.prim.ListConstruct %760, %767 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_636 = torch.constant.int 0 | |
%769 = torch.aten.cat %768, %int0_636 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %769, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%770 = torch.prim.ListConstruct %758, %764 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_637 = torch.constant.int 0 | |
%771 = torch.aten.cat %770, %int0_637 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %771, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_638 = torch.constant.int 26 | |
%int2_639 = torch.constant.int 2 | |
%int16_640 = torch.constant.int 16 | |
%int32_641 = torch.constant.int 32 | |
%int100_642 = torch.constant.int 100 | |
%772 = torch.prim.ListConstruct %359, %int26_638, %int2_639, %int16_640, %int32_641, %int100_642 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%773 = torch.aten.view %587, %772 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %773, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_643 = torch.constant.int 26 | |
%774 = torch.aten.mul.int %359, %int26_643 : !torch.int, !torch.int -> !torch.int | |
%int2_644 = torch.constant.int 2 | |
%775 = torch.aten.mul.int %774, %int2_644 : !torch.int, !torch.int -> !torch.int | |
%int16_645 = torch.constant.int 16 | |
%int32_646 = torch.constant.int 32 | |
%int100_647 = torch.constant.int 100 | |
%776 = torch.prim.ListConstruct %775, %int16_645, %int32_646, %int100_647 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%777 = torch.aten.view %773, %776 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %777, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%778 = torch.prim.ListConstruct %769 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_648 = torch.constant.bool false | |
%779 = torch.aten.index_put %777, %778, %771, %false_648 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %779, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_649 = torch.constant.int 26 | |
%int2_650 = torch.constant.int 2 | |
%int16_651 = torch.constant.int 16 | |
%int32_652 = torch.constant.int 32 | |
%int100_653 = torch.constant.int 100 | |
%780 = torch.prim.ListConstruct %359, %int26_649, %int2_650, %int16_651, %int32_652, %int100_653 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%781 = torch.aten.view %779, %780 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %781, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_654 = torch.constant.int 2662400 | |
%782 = torch.prim.ListConstruct %359, %int2662400_654 : (!torch.int, !torch.int) -> !torch.list<int> | |
%783 = torch.aten.view %781, %782 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %783, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_655 = torch.constant.int 1 | |
%int2_656 = torch.constant.int 2 | |
%784 = torch.aten.transpose.int %723, %int1_655, %int2_656 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %784, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_657 = torch.constant.int 1 | |
%int2_658 = torch.constant.int 2 | |
%785 = torch.aten.transpose.int %752, %int1_657, %int2_658 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %785, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_659 = torch.constant.int 1 | |
%int2_660 = torch.constant.int 2 | |
%786 = torch.aten.transpose.int %694, %int1_659, %int2_660 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %786, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_661 = torch.constant.int 2 | |
%int3_662 = torch.constant.int 3 | |
%787 = torch.aten.transpose.int %785, %int2_661, %int3_662 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %787, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_663 = torch.constant.int 1 | |
%int32_664 = torch.constant.int 32 | |
%int100_665 = torch.constant.int 100 | |
%788 = torch.prim.ListConstruct %int1_663, %int32_664, %709, %int100_665 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_666 = torch.constant.bool false | |
%789 = torch.aten.expand %784, %788, %false_666 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %789, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_667 = torch.constant.int 32 | |
%int100_668 = torch.constant.int 100 | |
%790 = torch.prim.ListConstruct %int32_667, %709, %int100_668 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%791 = torch.aten.view %789, %790 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %791, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_669 = torch.constant.int 1 | |
%int32_670 = torch.constant.int 32 | |
%int100_671 = torch.constant.int 100 | |
%792 = torch.prim.ListConstruct %int1_669, %int32_670, %int100_671, %738 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_672 = torch.constant.bool false | |
%793 = torch.aten.expand %787, %792, %false_672 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %793, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_673 = torch.constant.int 32 | |
%int100_674 = torch.constant.int 100 | |
%794 = torch.prim.ListConstruct %int32_673, %int100_674, %738 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%795 = torch.aten.view %793, %794 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %795, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%796 = torch.aten.bmm %791, %795 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %796, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_675 = torch.constant.int 1 | |
%int32_676 = torch.constant.int 32 | |
%797 = torch.prim.ListConstruct %int1_675, %int32_676, %709, %738 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%798 = torch.aten.view %796, %797 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %798, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_677 = torch.constant.float 1.000000e+01 | |
%799 = torch.aten.div.Scalar %798, %float1.000000e01_677 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %799, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_678 = torch.constant.int 1 | |
%800 = torch.aten.add.Tensor %799, %266, %int1_678 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %800, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_679 = torch.constant.int 6 | |
%801 = torch.prims.convert_element_type %800, %int6_679 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %801, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_680 = torch.constant.int -1 | |
%false_681 = torch.constant.bool false | |
%802 = torch.aten._softmax %801, %int-1_680, %false_681 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %802, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_682 = torch.constant.int 5 | |
%803 = torch.prims.convert_element_type %802, %int5_682 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %803, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_683 = torch.constant.int 1 | |
%int32_684 = torch.constant.int 32 | |
%804 = torch.prim.ListConstruct %int1_683, %int32_684, %709, %738 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_685 = torch.constant.bool false | |
%805 = torch.aten.expand %803, %804, %false_685 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %805, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_686 = torch.constant.int 32 | |
%806 = torch.prim.ListConstruct %int32_686, %709, %738 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%807 = torch.aten.view %805, %806 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %807, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_687 = torch.constant.int 1 | |
%808 = torch.aten.size.int %688, %int1_687 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_688 = torch.constant.int 1 | |
%int32_689 = torch.constant.int 32 | |
%int100_690 = torch.constant.int 100 | |
%809 = torch.prim.ListConstruct %int1_688, %int32_689, %808, %int100_690 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_691 = torch.constant.bool false | |
%810 = torch.aten.expand %786, %809, %false_691 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %810, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_692 = torch.constant.int 32 | |
%int100_693 = torch.constant.int 100 | |
%811 = torch.prim.ListConstruct %int32_692, %808, %int100_693 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%812 = torch.aten.view %810, %811 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %812, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%813 = torch.aten.bmm %807, %812 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %813, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_694 = torch.constant.int 1 | |
%int32_695 = torch.constant.int 32 | |
%int100_696 = torch.constant.int 100 | |
%814 = torch.prim.ListConstruct %int1_694, %int32_695, %709, %int100_696 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%815 = torch.aten.view %813, %814 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %815, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_697 = torch.constant.int 1 | |
%int2_698 = torch.constant.int 2 | |
%816 = torch.aten.transpose.int %815, %int1_697, %int2_698 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %816, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_699 = torch.constant.int 0 | |
%817 = torch.aten.clone %816, %int0_699 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %817, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_700 = torch.constant.int 1 | |
%int3200_701 = torch.constant.int 3200 | |
%818 = torch.prim.ListConstruct %int1_700, %709, %int3200_701 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%819 = torch.aten._unsafe_view %817, %818 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %819, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_702 = torch.constant.int -2 | |
%int-1_703 = torch.constant.int -1 | |
%820 = torch.aten.transpose.int %23, %int-2_702, %int-1_703 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_704 = torch.constant.int 3200 | |
%821 = torch.prim.ListConstruct %709, %int3200_704 : (!torch.int, !torch.int) -> !torch.list<int> | |
%822 = torch.aten.view %819, %821 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %822, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%823 = torch.aten.mm %822, %820 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %823, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_705 = torch.constant.int 1 | |
%int3200_706 = torch.constant.int 3200 | |
%824 = torch.prim.ListConstruct %int1_705, %709, %int3200_706 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%825 = torch.aten.view %823, %824 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %825, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_707 = torch.constant.int 1 | |
%826 = torch.aten.add.Tensor %661, %825, %int1_707 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %826, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_708 = torch.constant.int 6 | |
%827 = torch.prims.convert_element_type %826, %int6_708 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %827, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_709 = torch.constant.int 2 | |
%828 = torch.aten.pow.Tensor_Scalar %827, %int2_709 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %828, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_710 = torch.constant.int -1 | |
%829 = torch.prim.ListConstruct %int-1_710 : (!torch.int) -> !torch.list<int> | |
%true_711 = torch.constant.bool true | |
%none_712 = torch.constant.none | |
%830 = torch.aten.mean.dim %828, %829, %true_711, %none_712 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %830, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_713 = torch.constant.float 9.9999999747524271E-7 | |
%int1_714 = torch.constant.int 1 | |
%831 = torch.aten.add.Scalar %830, %float9.999990e-07_713, %int1_714 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %831, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%832 = torch.aten.rsqrt %831 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %832, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%833 = torch.aten.mul.Tensor %827, %832 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %833, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%834 = torch.aten.mul.Tensor %24, %833 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %834, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_715 = torch.constant.int 5 | |
%835 = torch.prims.convert_element_type %834, %int5_715 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %835, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_716 = torch.constant.int -2 | |
%int-1_717 = torch.constant.int -1 | |
%836 = torch.aten.transpose.int %25, %int-2_716, %int-1_717 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_718 = torch.constant.int 3200 | |
%837 = torch.prim.ListConstruct %240, %int3200_718 : (!torch.int, !torch.int) -> !torch.list<int> | |
%838 = torch.aten.view %835, %837 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %838, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%839 = torch.aten.mm %838, %836 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %839, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_719 = torch.constant.int 1 | |
%int8640_720 = torch.constant.int 8640 | |
%840 = torch.prim.ListConstruct %int1_719, %240, %int8640_720 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%841 = torch.aten.view %839, %840 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %841, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%842 = torch.aten.silu %841 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %842, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_721 = torch.constant.int -2 | |
%int-1_722 = torch.constant.int -1 | |
%843 = torch.aten.transpose.int %26, %int-2_721, %int-1_722 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_723 = torch.constant.int 3200 | |
%844 = torch.prim.ListConstruct %240, %int3200_723 : (!torch.int, !torch.int) -> !torch.list<int> | |
%845 = torch.aten.view %835, %844 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %845, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%846 = torch.aten.mm %845, %843 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %846, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_724 = torch.constant.int 1 | |
%int8640_725 = torch.constant.int 8640 | |
%847 = torch.prim.ListConstruct %int1_724, %240, %int8640_725 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%848 = torch.aten.view %846, %847 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %848, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%849 = torch.aten.mul.Tensor %842, %848 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %849, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_726 = torch.constant.int -2 | |
%int-1_727 = torch.constant.int -1 | |
%850 = torch.aten.transpose.int %27, %int-2_726, %int-1_727 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_728 = torch.constant.int 1 | |
%851 = torch.aten.size.int %841, %int1_728 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_729 = torch.constant.int 8640 | |
%852 = torch.prim.ListConstruct %851, %int8640_729 : (!torch.int, !torch.int) -> !torch.list<int> | |
%853 = torch.aten.view %849, %852 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %853, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%854 = torch.aten.mm %853, %850 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %854, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_730 = torch.constant.int 1 | |
%int3200_731 = torch.constant.int 3200 | |
%855 = torch.prim.ListConstruct %int1_730, %851, %int3200_731 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%856 = torch.aten.view %854, %855 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %856, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_732 = torch.constant.int 1 | |
%857 = torch.aten.add.Tensor %826, %856, %int1_732 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %857, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_733 = torch.constant.int 6 | |
%858 = torch.prims.convert_element_type %857, %int6_733 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %858, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_734 = torch.constant.int 2 | |
%859 = torch.aten.pow.Tensor_Scalar %858, %int2_734 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %859, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_735 = torch.constant.int -1 | |
%860 = torch.prim.ListConstruct %int-1_735 : (!torch.int) -> !torch.list<int> | |
%true_736 = torch.constant.bool true | |
%none_737 = torch.constant.none | |
%861 = torch.aten.mean.dim %859, %860, %true_736, %none_737 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %861, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_738 = torch.constant.float 9.9999999747524271E-7 | |
%int1_739 = torch.constant.int 1 | |
%862 = torch.aten.add.Scalar %861, %float9.999990e-07_738, %int1_739 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %862, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%863 = torch.aten.rsqrt %862 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %863, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%864 = torch.aten.mul.Tensor %858, %863 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %864, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%865 = torch.aten.mul.Tensor %28, %864 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %865, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_740 = torch.constant.int 5 | |
%866 = torch.prims.convert_element_type %865, %int5_740 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %866, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_741 = torch.constant.int -2 | |
%int-1_742 = torch.constant.int -1 | |
%867 = torch.aten.transpose.int %29, %int-2_741, %int-1_742 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_743 = torch.constant.int 3200 | |
%868 = torch.prim.ListConstruct %240, %int3200_743 : (!torch.int, !torch.int) -> !torch.list<int> | |
%869 = torch.aten.view %866, %868 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %869, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%870 = torch.aten.mm %869, %867 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %870, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_744 = torch.constant.int 1 | |
%int3200_745 = torch.constant.int 3200 | |
%871 = torch.prim.ListConstruct %int1_744, %240, %int3200_745 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%872 = torch.aten.view %870, %871 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %872, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_746 = torch.constant.int -2 | |
%int-1_747 = torch.constant.int -1 | |
%873 = torch.aten.transpose.int %30, %int-2_746, %int-1_747 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_748 = torch.constant.int 3200 | |
%874 = torch.prim.ListConstruct %240, %int3200_748 : (!torch.int, !torch.int) -> !torch.list<int> | |
%875 = torch.aten.view %866, %874 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %875, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%876 = torch.aten.mm %875, %873 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %876, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_749 = torch.constant.int 1 | |
%int3200_750 = torch.constant.int 3200 | |
%877 = torch.prim.ListConstruct %int1_749, %240, %int3200_750 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%878 = torch.aten.view %876, %877 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %878, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_751 = torch.constant.int -2 | |
%int-1_752 = torch.constant.int -1 | |
%879 = torch.aten.transpose.int %31, %int-2_751, %int-1_752 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_753 = torch.constant.int 3200 | |
%880 = torch.prim.ListConstruct %240, %int3200_753 : (!torch.int, !torch.int) -> !torch.list<int> | |
%881 = torch.aten.view %866, %880 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %881, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%882 = torch.aten.mm %881, %879 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %882, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_754 = torch.constant.int 1 | |
%int3200_755 = torch.constant.int 3200 | |
%883 = torch.prim.ListConstruct %int1_754, %240, %int3200_755 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%884 = torch.aten.view %882, %883 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %884, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_756 = torch.constant.int 1 | |
%int32_757 = torch.constant.int 32 | |
%int100_758 = torch.constant.int 100 | |
%885 = torch.prim.ListConstruct %int1_756, %240, %int32_757, %int100_758 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%886 = torch.aten.view %872, %885 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %886, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_759 = torch.constant.int 1 | |
%int32_760 = torch.constant.int 32 | |
%int100_761 = torch.constant.int 100 | |
%887 = torch.prim.ListConstruct %int1_759, %240, %int32_760, %int100_761 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%888 = torch.aten.view %878, %887 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %888, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_762 = torch.constant.int 1 | |
%int32_763 = torch.constant.int 32 | |
%int100_764 = torch.constant.int 100 | |
%889 = torch.prim.ListConstruct %int1_762, %240, %int32_763, %int100_764 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%890 = torch.aten.view %884, %889 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %890, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_765 = torch.constant.int 2048 | |
%none_766 = torch.constant.none | |
%none_767 = torch.constant.none | |
%cpu_768 = torch.constant.device "cpu" | |
%false_769 = torch.constant.bool false | |
%891 = torch.aten.arange %int2048_765, %none_766, %none_767, %cpu_768, %false_769 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_770 = torch.constant.int 0 | |
%int100_771 = torch.constant.int 100 | |
%int2_772 = torch.constant.int 2 | |
%none_773 = torch.constant.none | |
%none_774 = torch.constant.none | |
%cpu_775 = torch.constant.device "cpu" | |
%false_776 = torch.constant.bool false | |
%892 = torch.aten.arange.start_step %int0_770, %int100_771, %int2_772, %none_773, %none_774, %cpu_775, %false_776 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_777 = torch.constant.int 0 | |
%int0_778 = torch.constant.int 0 | |
%int50_779 = torch.constant.int 50 | |
%int1_780 = torch.constant.int 1 | |
%893 = torch.aten.slice.Tensor %892, %int0_777, %int0_778, %int50_779, %int1_780 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_781 = torch.constant.int 6 | |
%894 = torch.prims.convert_element_type %893, %int6_781 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_782 = torch.constant.int 100 | |
%895 = torch.aten.div.Scalar %894, %int100_782 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_783 = torch.constant.float 1.000000e+04 | |
%896 = torch.aten.pow.Scalar %float1.000000e04_783, %895 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%897 = torch.aten.reciprocal %896 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_784 = torch.constant.float 1.000000e+00 | |
%898 = torch.aten.mul.Scalar %897, %float1.000000e00_784 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_785 = torch.constant.int 2048 | |
%int1_786 = torch.constant.int 1 | |
%899 = torch.prim.ListConstruct %int2048_785, %int1_786 : (!torch.int, !torch.int) -> !torch.list<int> | |
%900 = torch.aten.view %891, %899 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%901 = torch.aten.mul.Tensor %900, %898 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%902 = torch.aten.cos %901 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%903 = torch.aten.sin %901 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%904 = torch.aten.complex %902, %903 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_787 = torch.constant.int 1 | |
%905 = torch.aten.size.int %872, %int1_787 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_788 = torch.constant.int 0 | |
%906 = torch.aten.add.int %int0_788, %905 : !torch.int, !torch.int -> !torch.int | |
%int0_789 = torch.constant.int 0 | |
%int0_790 = torch.constant.int 0 | |
%int1_791 = torch.constant.int 1 | |
%907 = torch.aten.slice.Tensor %904, %int0_789, %int0_790, %906, %int1_791 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %907, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_792 = torch.constant.int 1 | |
%int0_793 = torch.constant.int 0 | |
%int9223372036854775807_794 = torch.constant.int 9223372036854775807 | |
%int1_795 = torch.constant.int 1 | |
%908 = torch.aten.slice.Tensor %907, %int1_792, %int0_793, %int9223372036854775807_794, %int1_795 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %908, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_796 = torch.constant.int 0 | |
%909 = torch.aten.unsqueeze %908, %int0_796 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %909, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_797 = torch.constant.int 2 | |
%910 = torch.aten.unsqueeze %909, %int2_797 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %910, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_798 = torch.constant.int 3 | |
%int0_799 = torch.constant.int 0 | |
%int9223372036854775807_800 = torch.constant.int 9223372036854775807 | |
%int1_801 = torch.constant.int 1 | |
%911 = torch.aten.slice.Tensor %910, %int3_798, %int0_799, %int9223372036854775807_800, %int1_801 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %911, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%912 = torch_c.to_builtin_tensor %886 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_802 = arith.constant 1 : index | |
%dim_803 = tensor.dim %912, %c1_802 : tensor<1x?x32x100xf16> | |
%913 = flow.tensor.bitcast %912 : tensor<1x?x32x100xf16>{%dim_803} -> tensor<1x?x32x50xcomplex<f16>>{%dim_803} | |
%914 = torch_c.from_builtin_tensor %913 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %914, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%915 = torch.aten.mul.Tensor %914, %911 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %915, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%916 = torch_c.to_builtin_tensor %915 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_804 = arith.constant 1 : index | |
%dim_805 = tensor.dim %916, %c1_804 : tensor<1x?x32x50xcomplex<f32>> | |
%917 = flow.tensor.bitcast %916 : tensor<1x?x32x50xcomplex<f32>>{%dim_805} -> tensor<1x?x32x100xf32>{%dim_805} | |
%918 = torch_c.from_builtin_tensor %917 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %918, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_806 = torch.constant.int 5 | |
%919 = torch.prims.convert_element_type %918, %int5_806 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %919, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_807 = torch.constant.int 2048 | |
%none_808 = torch.constant.none | |
%none_809 = torch.constant.none | |
%cpu_810 = torch.constant.device "cpu" | |
%false_811 = torch.constant.bool false | |
%920 = torch.aten.arange %int2048_807, %none_808, %none_809, %cpu_810, %false_811 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_812 = torch.constant.int 0 | |
%int100_813 = torch.constant.int 100 | |
%int2_814 = torch.constant.int 2 | |
%none_815 = torch.constant.none | |
%none_816 = torch.constant.none | |
%cpu_817 = torch.constant.device "cpu" | |
%false_818 = torch.constant.bool false | |
%921 = torch.aten.arange.start_step %int0_812, %int100_813, %int2_814, %none_815, %none_816, %cpu_817, %false_818 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_819 = torch.constant.int 0 | |
%int0_820 = torch.constant.int 0 | |
%int50_821 = torch.constant.int 50 | |
%int1_822 = torch.constant.int 1 | |
%922 = torch.aten.slice.Tensor %921, %int0_819, %int0_820, %int50_821, %int1_822 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_823 = torch.constant.int 6 | |
%923 = torch.prims.convert_element_type %922, %int6_823 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_824 = torch.constant.int 100 | |
%924 = torch.aten.div.Scalar %923, %int100_824 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_825 = torch.constant.float 1.000000e+04 | |
%925 = torch.aten.pow.Scalar %float1.000000e04_825, %924 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%926 = torch.aten.reciprocal %925 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_826 = torch.constant.float 1.000000e+00 | |
%927 = torch.aten.mul.Scalar %926, %float1.000000e00_826 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_827 = torch.constant.int 2048 | |
%int1_828 = torch.constant.int 1 | |
%928 = torch.prim.ListConstruct %int2048_827, %int1_828 : (!torch.int, !torch.int) -> !torch.list<int> | |
%929 = torch.aten.view %920, %928 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%930 = torch.aten.mul.Tensor %929, %927 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%931 = torch.aten.cos %930 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%932 = torch.aten.sin %930 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%933 = torch.aten.complex %931, %932 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_829 = torch.constant.int 1 | |
%934 = torch.aten.size.int %878, %int1_829 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_830 = torch.constant.int 0 | |
%935 = torch.aten.add.int %int0_830, %934 : !torch.int, !torch.int -> !torch.int | |
%int0_831 = torch.constant.int 0 | |
%int0_832 = torch.constant.int 0 | |
%int1_833 = torch.constant.int 1 | |
%936 = torch.aten.slice.Tensor %933, %int0_831, %int0_832, %935, %int1_833 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %936, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_834 = torch.constant.int 1 | |
%int0_835 = torch.constant.int 0 | |
%int9223372036854775807_836 = torch.constant.int 9223372036854775807 | |
%int1_837 = torch.constant.int 1 | |
%937 = torch.aten.slice.Tensor %936, %int1_834, %int0_835, %int9223372036854775807_836, %int1_837 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %937, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_838 = torch.constant.int 0 | |
%938 = torch.aten.unsqueeze %937, %int0_838 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %938, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_839 = torch.constant.int 2 | |
%939 = torch.aten.unsqueeze %938, %int2_839 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %939, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_840 = torch.constant.int 3 | |
%int0_841 = torch.constant.int 0 | |
%int9223372036854775807_842 = torch.constant.int 9223372036854775807 | |
%int1_843 = torch.constant.int 1 | |
%940 = torch.aten.slice.Tensor %939, %int3_840, %int0_841, %int9223372036854775807_842, %int1_843 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %940, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%941 = torch_c.to_builtin_tensor %888 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_844 = arith.constant 1 : index | |
%dim_845 = tensor.dim %941, %c1_844 : tensor<1x?x32x100xf16> | |
%942 = flow.tensor.bitcast %941 : tensor<1x?x32x100xf16>{%dim_845} -> tensor<1x?x32x50xcomplex<f16>>{%dim_845} | |
%943 = torch_c.from_builtin_tensor %942 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %943, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%944 = torch.aten.mul.Tensor %943, %940 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %944, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%945 = torch_c.to_builtin_tensor %944 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_846 = arith.constant 1 : index | |
%dim_847 = tensor.dim %945, %c1_846 : tensor<1x?x32x50xcomplex<f32>> | |
%946 = flow.tensor.bitcast %945 : tensor<1x?x32x50xcomplex<f32>>{%dim_847} -> tensor<1x?x32x100xf32>{%dim_847} | |
%947 = torch_c.from_builtin_tensor %946 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %947, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_848 = torch.constant.int 5 | |
%948 = torch.prims.convert_element_type %947, %int5_848 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %948, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_849 = torch.constant.int 52 | |
%949 = torch.aten.mul.Scalar %arg2, %int52_849 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %949, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int6_850 = torch.constant.int 6 | |
%int1_851 = torch.constant.int 1 | |
%950 = torch.aten.add.Scalar %949, %int6_850, %int1_851 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %950, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_852 = torch.constant.int 1 | |
%int16_853 = torch.constant.int 16 | |
%int32_854 = torch.constant.int 32 | |
%int100_855 = torch.constant.int 100 | |
%951 = torch.prim.ListConstruct %int1_852, %368, %int16_853, %int32_854, %int100_855 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%952 = torch.aten.view %948, %951 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %952, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_856 = torch.constant.int 16 | |
%int32_857 = torch.constant.int 32 | |
%int100_858 = torch.constant.int 100 | |
%953 = torch.prim.ListConstruct %368, %int16_856, %int32_857, %int100_858 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%954 = torch.aten.view %952, %953 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %954, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%955 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%956 = torch.aten.view %950, %955 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %956, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_859 = torch.constant.int 1 | |
%int16_860 = torch.constant.int 16 | |
%int32_861 = torch.constant.int 32 | |
%int100_862 = torch.constant.int 100 | |
%957 = torch.prim.ListConstruct %int1_859, %368, %int16_860, %int32_861, %int100_862 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%958 = torch.aten.view %890, %957 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %958, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_863 = torch.constant.int 16 | |
%int32_864 = torch.constant.int 32 | |
%int100_865 = torch.constant.int 100 | |
%959 = torch.prim.ListConstruct %368, %int16_863, %int32_864, %int100_865 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%960 = torch.aten.view %958, %959 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %960, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_866 = torch.constant.int 1 | |
%int1_867 = torch.constant.int 1 | |
%961 = torch.aten.add.Scalar %950, %int1_866, %int1_867 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %961, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%962 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%963 = torch.aten.view %961, %962 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %963, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%964 = torch.prim.ListConstruct %956, %963 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_868 = torch.constant.int 0 | |
%965 = torch.aten.cat %964, %int0_868 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %965, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%966 = torch.prim.ListConstruct %954, %960 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_869 = torch.constant.int 0 | |
%967 = torch.aten.cat %966, %int0_869 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %967, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_870 = torch.constant.int 26 | |
%int2_871 = torch.constant.int 2 | |
%int16_872 = torch.constant.int 16 | |
%int32_873 = torch.constant.int 32 | |
%int100_874 = torch.constant.int 100 | |
%968 = torch.prim.ListConstruct %359, %int26_870, %int2_871, %int16_872, %int32_873, %int100_874 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%969 = torch.aten.view %783, %968 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %969, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_875 = torch.constant.int 26 | |
%970 = torch.aten.mul.int %359, %int26_875 : !torch.int, !torch.int -> !torch.int | |
%int2_876 = torch.constant.int 2 | |
%971 = torch.aten.mul.int %970, %int2_876 : !torch.int, !torch.int -> !torch.int | |
%int16_877 = torch.constant.int 16 | |
%int32_878 = torch.constant.int 32 | |
%int100_879 = torch.constant.int 100 | |
%972 = torch.prim.ListConstruct %971, %int16_877, %int32_878, %int100_879 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%973 = torch.aten.view %969, %972 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %973, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%974 = torch.prim.ListConstruct %965 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_880 = torch.constant.bool false | |
%975 = torch.aten.index_put %973, %974, %967, %false_880 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %975, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_881 = torch.constant.int 26 | |
%int2_882 = torch.constant.int 2 | |
%int16_883 = torch.constant.int 16 | |
%int32_884 = torch.constant.int 32 | |
%int100_885 = torch.constant.int 100 | |
%976 = torch.prim.ListConstruct %359, %int26_881, %int2_882, %int16_883, %int32_884, %int100_885 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%977 = torch.aten.view %975, %976 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %977, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_886 = torch.constant.int 2662400 | |
%978 = torch.prim.ListConstruct %359, %int2662400_886 : (!torch.int, !torch.int) -> !torch.list<int> | |
%979 = torch.aten.view %977, %978 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %979, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_887 = torch.constant.int 1 | |
%int2_888 = torch.constant.int 2 | |
%980 = torch.aten.transpose.int %919, %int1_887, %int2_888 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %980, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_889 = torch.constant.int 1 | |
%int2_890 = torch.constant.int 2 | |
%981 = torch.aten.transpose.int %948, %int1_889, %int2_890 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %981, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_891 = torch.constant.int 1 | |
%int2_892 = torch.constant.int 2 | |
%982 = torch.aten.transpose.int %890, %int1_891, %int2_892 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %982, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_893 = torch.constant.int 2 | |
%int3_894 = torch.constant.int 3 | |
%983 = torch.aten.transpose.int %981, %int2_893, %int3_894 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %983, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_895 = torch.constant.int 1 | |
%int32_896 = torch.constant.int 32 | |
%int100_897 = torch.constant.int 100 | |
%984 = torch.prim.ListConstruct %int1_895, %int32_896, %905, %int100_897 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_898 = torch.constant.bool false | |
%985 = torch.aten.expand %980, %984, %false_898 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %985, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_899 = torch.constant.int 32 | |
%int100_900 = torch.constant.int 100 | |
%986 = torch.prim.ListConstruct %int32_899, %905, %int100_900 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%987 = torch.aten.view %985, %986 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %987, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_901 = torch.constant.int 1 | |
%int32_902 = torch.constant.int 32 | |
%int100_903 = torch.constant.int 100 | |
%988 = torch.prim.ListConstruct %int1_901, %int32_902, %int100_903, %934 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_904 = torch.constant.bool false | |
%989 = torch.aten.expand %983, %988, %false_904 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %989, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_905 = torch.constant.int 32 | |
%int100_906 = torch.constant.int 100 | |
%990 = torch.prim.ListConstruct %int32_905, %int100_906, %934 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%991 = torch.aten.view %989, %990 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %991, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%992 = torch.aten.bmm %987, %991 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %992, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_907 = torch.constant.int 1 | |
%int32_908 = torch.constant.int 32 | |
%993 = torch.prim.ListConstruct %int1_907, %int32_908, %905, %934 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%994 = torch.aten.view %992, %993 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %994, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_909 = torch.constant.float 1.000000e+01 | |
%995 = torch.aten.div.Scalar %994, %float1.000000e01_909 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %995, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_910 = torch.constant.int 1 | |
%996 = torch.aten.add.Tensor %995, %266, %int1_910 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %996, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_911 = torch.constant.int 6 | |
%997 = torch.prims.convert_element_type %996, %int6_911 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %997, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_912 = torch.constant.int -1 | |
%false_913 = torch.constant.bool false | |
%998 = torch.aten._softmax %997, %int-1_912, %false_913 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %998, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_914 = torch.constant.int 5 | |
%999 = torch.prims.convert_element_type %998, %int5_914 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %999, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_915 = torch.constant.int 1 | |
%int32_916 = torch.constant.int 32 | |
%1000 = torch.prim.ListConstruct %int1_915, %int32_916, %905, %934 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_917 = torch.constant.bool false | |
%1001 = torch.aten.expand %999, %1000, %false_917 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1001, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_918 = torch.constant.int 32 | |
%1002 = torch.prim.ListConstruct %int32_918, %905, %934 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1003 = torch.aten.view %1001, %1002 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1003, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_919 = torch.constant.int 1 | |
%1004 = torch.aten.size.int %884, %int1_919 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_920 = torch.constant.int 1 | |
%int32_921 = torch.constant.int 32 | |
%int100_922 = torch.constant.int 100 | |
%1005 = torch.prim.ListConstruct %int1_920, %int32_921, %1004, %int100_922 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_923 = torch.constant.bool false | |
%1006 = torch.aten.expand %982, %1005, %false_923 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1006, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_924 = torch.constant.int 32 | |
%int100_925 = torch.constant.int 100 | |
%1007 = torch.prim.ListConstruct %int32_924, %1004, %int100_925 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1008 = torch.aten.view %1006, %1007 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1008, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%1009 = torch.aten.bmm %1003, %1008 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1009, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_926 = torch.constant.int 1 | |
%int32_927 = torch.constant.int 32 | |
%int100_928 = torch.constant.int 100 | |
%1010 = torch.prim.ListConstruct %int1_926, %int32_927, %905, %int100_928 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1011 = torch.aten.view %1009, %1010 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1011, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_929 = torch.constant.int 1 | |
%int2_930 = torch.constant.int 2 | |
%1012 = torch.aten.transpose.int %1011, %int1_929, %int2_930 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1012, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_931 = torch.constant.int 0 | |
%1013 = torch.aten.clone %1012, %int0_931 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1013, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_932 = torch.constant.int 1 | |
%int3200_933 = torch.constant.int 3200 | |
%1014 = torch.prim.ListConstruct %int1_932, %905, %int3200_933 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1015 = torch.aten._unsafe_view %1013, %1014 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1015, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_934 = torch.constant.int -2 | |
%int-1_935 = torch.constant.int -1 | |
%1016 = torch.aten.transpose.int %32, %int-2_934, %int-1_935 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_936 = torch.constant.int 3200 | |
%1017 = torch.prim.ListConstruct %905, %int3200_936 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1018 = torch.aten.view %1015, %1017 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1018, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1019 = torch.aten.mm %1018, %1016 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1019, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_937 = torch.constant.int 1 | |
%int3200_938 = torch.constant.int 3200 | |
%1020 = torch.prim.ListConstruct %int1_937, %905, %int3200_938 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1021 = torch.aten.view %1019, %1020 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1021, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_939 = torch.constant.int 1 | |
%1022 = torch.aten.add.Tensor %857, %1021, %int1_939 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1022, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_940 = torch.constant.int 6 | |
%1023 = torch.prims.convert_element_type %1022, %int6_940 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1023, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_941 = torch.constant.int 2 | |
%1024 = torch.aten.pow.Tensor_Scalar %1023, %int2_941 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1024, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_942 = torch.constant.int -1 | |
%1025 = torch.prim.ListConstruct %int-1_942 : (!torch.int) -> !torch.list<int> | |
%true_943 = torch.constant.bool true | |
%none_944 = torch.constant.none | |
%1026 = torch.aten.mean.dim %1024, %1025, %true_943, %none_944 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1026, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_945 = torch.constant.float 9.9999999747524271E-7 | |
%int1_946 = torch.constant.int 1 | |
%1027 = torch.aten.add.Scalar %1026, %float9.999990e-07_945, %int1_946 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1027, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1028 = torch.aten.rsqrt %1027 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1028, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1029 = torch.aten.mul.Tensor %1023, %1028 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1029, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1030 = torch.aten.mul.Tensor %33, %1029 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1030, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_947 = torch.constant.int 5 | |
%1031 = torch.prims.convert_element_type %1030, %int5_947 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1031, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_948 = torch.constant.int -2 | |
%int-1_949 = torch.constant.int -1 | |
%1032 = torch.aten.transpose.int %34, %int-2_948, %int-1_949 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_950 = torch.constant.int 3200 | |
%1033 = torch.prim.ListConstruct %240, %int3200_950 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1034 = torch.aten.view %1031, %1033 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1034, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1035 = torch.aten.mm %1034, %1032 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1035, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_951 = torch.constant.int 1 | |
%int8640_952 = torch.constant.int 8640 | |
%1036 = torch.prim.ListConstruct %int1_951, %240, %int8640_952 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1037 = torch.aten.view %1035, %1036 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1037, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1038 = torch.aten.silu %1037 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1038, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_953 = torch.constant.int -2 | |
%int-1_954 = torch.constant.int -1 | |
%1039 = torch.aten.transpose.int %35, %int-2_953, %int-1_954 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_955 = torch.constant.int 3200 | |
%1040 = torch.prim.ListConstruct %240, %int3200_955 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1041 = torch.aten.view %1031, %1040 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1041, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1042 = torch.aten.mm %1041, %1039 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1042, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_956 = torch.constant.int 1 | |
%int8640_957 = torch.constant.int 8640 | |
%1043 = torch.prim.ListConstruct %int1_956, %240, %int8640_957 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1044 = torch.aten.view %1042, %1043 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1044, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1045 = torch.aten.mul.Tensor %1038, %1044 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1045, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_958 = torch.constant.int -2 | |
%int-1_959 = torch.constant.int -1 | |
%1046 = torch.aten.transpose.int %36, %int-2_958, %int-1_959 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_960 = torch.constant.int 1 | |
%1047 = torch.aten.size.int %1037, %int1_960 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_961 = torch.constant.int 8640 | |
%1048 = torch.prim.ListConstruct %1047, %int8640_961 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1049 = torch.aten.view %1045, %1048 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1049, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%1050 = torch.aten.mm %1049, %1046 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1050, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_962 = torch.constant.int 1 | |
%int3200_963 = torch.constant.int 3200 | |
%1051 = torch.prim.ListConstruct %int1_962, %1047, %int3200_963 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1052 = torch.aten.view %1050, %1051 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1052, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_964 = torch.constant.int 1 | |
%1053 = torch.aten.add.Tensor %1022, %1052, %int1_964 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1053, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_965 = torch.constant.int 6 | |
%1054 = torch.prims.convert_element_type %1053, %int6_965 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1054, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_966 = torch.constant.int 2 | |
%1055 = torch.aten.pow.Tensor_Scalar %1054, %int2_966 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1055, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_967 = torch.constant.int -1 | |
%1056 = torch.prim.ListConstruct %int-1_967 : (!torch.int) -> !torch.list<int> | |
%true_968 = torch.constant.bool true | |
%none_969 = torch.constant.none | |
%1057 = torch.aten.mean.dim %1055, %1056, %true_968, %none_969 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1057, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_970 = torch.constant.float 9.9999999747524271E-7 | |
%int1_971 = torch.constant.int 1 | |
%1058 = torch.aten.add.Scalar %1057, %float9.999990e-07_970, %int1_971 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1058, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1059 = torch.aten.rsqrt %1058 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1059, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1060 = torch.aten.mul.Tensor %1054, %1059 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1060, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1061 = torch.aten.mul.Tensor %37, %1060 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1061, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_972 = torch.constant.int 5 | |
%1062 = torch.prims.convert_element_type %1061, %int5_972 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1062, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_973 = torch.constant.int -2 | |
%int-1_974 = torch.constant.int -1 | |
%1063 = torch.aten.transpose.int %38, %int-2_973, %int-1_974 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_975 = torch.constant.int 3200 | |
%1064 = torch.prim.ListConstruct %240, %int3200_975 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1065 = torch.aten.view %1062, %1064 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1065, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1066 = torch.aten.mm %1065, %1063 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1066, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_976 = torch.constant.int 1 | |
%int3200_977 = torch.constant.int 3200 | |
%1067 = torch.prim.ListConstruct %int1_976, %240, %int3200_977 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1068 = torch.aten.view %1066, %1067 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1068, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_978 = torch.constant.int -2 | |
%int-1_979 = torch.constant.int -1 | |
%1069 = torch.aten.transpose.int %39, %int-2_978, %int-1_979 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_980 = torch.constant.int 3200 | |
%1070 = torch.prim.ListConstruct %240, %int3200_980 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1071 = torch.aten.view %1062, %1070 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1071, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1072 = torch.aten.mm %1071, %1069 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1072, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_981 = torch.constant.int 1 | |
%int3200_982 = torch.constant.int 3200 | |
%1073 = torch.prim.ListConstruct %int1_981, %240, %int3200_982 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1074 = torch.aten.view %1072, %1073 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1074, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_983 = torch.constant.int -2 | |
%int-1_984 = torch.constant.int -1 | |
%1075 = torch.aten.transpose.int %40, %int-2_983, %int-1_984 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_985 = torch.constant.int 3200 | |
%1076 = torch.prim.ListConstruct %240, %int3200_985 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1077 = torch.aten.view %1062, %1076 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1077, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1078 = torch.aten.mm %1077, %1075 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1078, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_986 = torch.constant.int 1 | |
%int3200_987 = torch.constant.int 3200 | |
%1079 = torch.prim.ListConstruct %int1_986, %240, %int3200_987 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1080 = torch.aten.view %1078, %1079 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1080, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_988 = torch.constant.int 1 | |
%int32_989 = torch.constant.int 32 | |
%int100_990 = torch.constant.int 100 | |
%1081 = torch.prim.ListConstruct %int1_988, %240, %int32_989, %int100_990 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1082 = torch.aten.view %1068, %1081 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1082, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_991 = torch.constant.int 1 | |
%int32_992 = torch.constant.int 32 | |
%int100_993 = torch.constant.int 100 | |
%1083 = torch.prim.ListConstruct %int1_991, %240, %int32_992, %int100_993 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1084 = torch.aten.view %1074, %1083 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1084, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_994 = torch.constant.int 1 | |
%int32_995 = torch.constant.int 32 | |
%int100_996 = torch.constant.int 100 | |
%1085 = torch.prim.ListConstruct %int1_994, %240, %int32_995, %int100_996 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1086 = torch.aten.view %1080, %1085 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1086, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_997 = torch.constant.int 2048 | |
%none_998 = torch.constant.none | |
%none_999 = torch.constant.none | |
%cpu_1000 = torch.constant.device "cpu" | |
%false_1001 = torch.constant.bool false | |
%1087 = torch.aten.arange %int2048_997, %none_998, %none_999, %cpu_1000, %false_1001 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1002 = torch.constant.int 0 | |
%int100_1003 = torch.constant.int 100 | |
%int2_1004 = torch.constant.int 2 | |
%none_1005 = torch.constant.none | |
%none_1006 = torch.constant.none | |
%cpu_1007 = torch.constant.device "cpu" | |
%false_1008 = torch.constant.bool false | |
%1088 = torch.aten.arange.start_step %int0_1002, %int100_1003, %int2_1004, %none_1005, %none_1006, %cpu_1007, %false_1008 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1009 = torch.constant.int 0 | |
%int0_1010 = torch.constant.int 0 | |
%int50_1011 = torch.constant.int 50 | |
%int1_1012 = torch.constant.int 1 | |
%1089 = torch.aten.slice.Tensor %1088, %int0_1009, %int0_1010, %int50_1011, %int1_1012 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1013 = torch.constant.int 6 | |
%1090 = torch.prims.convert_element_type %1089, %int6_1013 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1014 = torch.constant.int 100 | |
%1091 = torch.aten.div.Scalar %1090, %int100_1014 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1015 = torch.constant.float 1.000000e+04 | |
%1092 = torch.aten.pow.Scalar %float1.000000e04_1015, %1091 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1093 = torch.aten.reciprocal %1092 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1016 = torch.constant.float 1.000000e+00 | |
%1094 = torch.aten.mul.Scalar %1093, %float1.000000e00_1016 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1017 = torch.constant.int 2048 | |
%int1_1018 = torch.constant.int 1 | |
%1095 = torch.prim.ListConstruct %int2048_1017, %int1_1018 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1096 = torch.aten.view %1087, %1095 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1097 = torch.aten.mul.Tensor %1096, %1094 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1098 = torch.aten.cos %1097 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1099 = torch.aten.sin %1097 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1100 = torch.aten.complex %1098, %1099 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1019 = torch.constant.int 1 | |
%1101 = torch.aten.size.int %1068, %int1_1019 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1020 = torch.constant.int 0 | |
%1102 = torch.aten.add.int %int0_1020, %1101 : !torch.int, !torch.int -> !torch.int | |
%int0_1021 = torch.constant.int 0 | |
%int0_1022 = torch.constant.int 0 | |
%int1_1023 = torch.constant.int 1 | |
%1103 = torch.aten.slice.Tensor %1100, %int0_1021, %int0_1022, %1102, %int1_1023 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1103, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1024 = torch.constant.int 1 | |
%int0_1025 = torch.constant.int 0 | |
%int9223372036854775807_1026 = torch.constant.int 9223372036854775807 | |
%int1_1027 = torch.constant.int 1 | |
%1104 = torch.aten.slice.Tensor %1103, %int1_1024, %int0_1025, %int9223372036854775807_1026, %int1_1027 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1104, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1028 = torch.constant.int 0 | |
%1105 = torch.aten.unsqueeze %1104, %int0_1028 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1105, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1029 = torch.constant.int 2 | |
%1106 = torch.aten.unsqueeze %1105, %int2_1029 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1106, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1030 = torch.constant.int 3 | |
%int0_1031 = torch.constant.int 0 | |
%int9223372036854775807_1032 = torch.constant.int 9223372036854775807 | |
%int1_1033 = torch.constant.int 1 | |
%1107 = torch.aten.slice.Tensor %1106, %int3_1030, %int0_1031, %int9223372036854775807_1032, %int1_1033 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1107, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1108 = torch_c.to_builtin_tensor %1082 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1034 = arith.constant 1 : index | |
%dim_1035 = tensor.dim %1108, %c1_1034 : tensor<1x?x32x100xf16> | |
%1109 = flow.tensor.bitcast %1108 : tensor<1x?x32x100xf16>{%dim_1035} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1035} | |
%1110 = torch_c.from_builtin_tensor %1109 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1110, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1111 = torch.aten.mul.Tensor %1110, %1107 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1111, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1112 = torch_c.to_builtin_tensor %1111 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1036 = arith.constant 1 : index | |
%dim_1037 = tensor.dim %1112, %c1_1036 : tensor<1x?x32x50xcomplex<f32>> | |
%1113 = flow.tensor.bitcast %1112 : tensor<1x?x32x50xcomplex<f32>>{%dim_1037} -> tensor<1x?x32x100xf32>{%dim_1037} | |
%1114 = torch_c.from_builtin_tensor %1113 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1114, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1038 = torch.constant.int 5 | |
%1115 = torch.prims.convert_element_type %1114, %int5_1038 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1115, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1039 = torch.constant.int 2048 | |
%none_1040 = torch.constant.none | |
%none_1041 = torch.constant.none | |
%cpu_1042 = torch.constant.device "cpu" | |
%false_1043 = torch.constant.bool false | |
%1116 = torch.aten.arange %int2048_1039, %none_1040, %none_1041, %cpu_1042, %false_1043 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1044 = torch.constant.int 0 | |
%int100_1045 = torch.constant.int 100 | |
%int2_1046 = torch.constant.int 2 | |
%none_1047 = torch.constant.none | |
%none_1048 = torch.constant.none | |
%cpu_1049 = torch.constant.device "cpu" | |
%false_1050 = torch.constant.bool false | |
%1117 = torch.aten.arange.start_step %int0_1044, %int100_1045, %int2_1046, %none_1047, %none_1048, %cpu_1049, %false_1050 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1051 = torch.constant.int 0 | |
%int0_1052 = torch.constant.int 0 | |
%int50_1053 = torch.constant.int 50 | |
%int1_1054 = torch.constant.int 1 | |
%1118 = torch.aten.slice.Tensor %1117, %int0_1051, %int0_1052, %int50_1053, %int1_1054 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1055 = torch.constant.int 6 | |
%1119 = torch.prims.convert_element_type %1118, %int6_1055 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1056 = torch.constant.int 100 | |
%1120 = torch.aten.div.Scalar %1119, %int100_1056 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1057 = torch.constant.float 1.000000e+04 | |
%1121 = torch.aten.pow.Scalar %float1.000000e04_1057, %1120 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1122 = torch.aten.reciprocal %1121 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1058 = torch.constant.float 1.000000e+00 | |
%1123 = torch.aten.mul.Scalar %1122, %float1.000000e00_1058 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1059 = torch.constant.int 2048 | |
%int1_1060 = torch.constant.int 1 | |
%1124 = torch.prim.ListConstruct %int2048_1059, %int1_1060 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1125 = torch.aten.view %1116, %1124 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1126 = torch.aten.mul.Tensor %1125, %1123 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1127 = torch.aten.cos %1126 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1128 = torch.aten.sin %1126 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1129 = torch.aten.complex %1127, %1128 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1061 = torch.constant.int 1 | |
%1130 = torch.aten.size.int %1074, %int1_1061 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1062 = torch.constant.int 0 | |
%1131 = torch.aten.add.int %int0_1062, %1130 : !torch.int, !torch.int -> !torch.int | |
%int0_1063 = torch.constant.int 0 | |
%int0_1064 = torch.constant.int 0 | |
%int1_1065 = torch.constant.int 1 | |
%1132 = torch.aten.slice.Tensor %1129, %int0_1063, %int0_1064, %1131, %int1_1065 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1132, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1066 = torch.constant.int 1 | |
%int0_1067 = torch.constant.int 0 | |
%int9223372036854775807_1068 = torch.constant.int 9223372036854775807 | |
%int1_1069 = torch.constant.int 1 | |
%1133 = torch.aten.slice.Tensor %1132, %int1_1066, %int0_1067, %int9223372036854775807_1068, %int1_1069 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1133, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1070 = torch.constant.int 0 | |
%1134 = torch.aten.unsqueeze %1133, %int0_1070 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1134, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1071 = torch.constant.int 2 | |
%1135 = torch.aten.unsqueeze %1134, %int2_1071 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1135, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1072 = torch.constant.int 3 | |
%int0_1073 = torch.constant.int 0 | |
%int9223372036854775807_1074 = torch.constant.int 9223372036854775807 | |
%int1_1075 = torch.constant.int 1 | |
%1136 = torch.aten.slice.Tensor %1135, %int3_1072, %int0_1073, %int9223372036854775807_1074, %int1_1075 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1136, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1137 = torch_c.to_builtin_tensor %1084 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1076 = arith.constant 1 : index | |
%dim_1077 = tensor.dim %1137, %c1_1076 : tensor<1x?x32x100xf16> | |
%1138 = flow.tensor.bitcast %1137 : tensor<1x?x32x100xf16>{%dim_1077} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1077} | |
%1139 = torch_c.from_builtin_tensor %1138 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1139, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1140 = torch.aten.mul.Tensor %1139, %1136 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1140, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1141 = torch_c.to_builtin_tensor %1140 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1078 = arith.constant 1 : index | |
%dim_1079 = tensor.dim %1141, %c1_1078 : tensor<1x?x32x50xcomplex<f32>> | |
%1142 = flow.tensor.bitcast %1141 : tensor<1x?x32x50xcomplex<f32>>{%dim_1079} -> tensor<1x?x32x100xf32>{%dim_1079} | |
%1143 = torch_c.from_builtin_tensor %1142 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1143, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1080 = torch.constant.int 5 | |
%1144 = torch.prims.convert_element_type %1143, %int5_1080 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1144, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_1081 = torch.constant.int 52 | |
%1145 = torch.aten.mul.Scalar %arg2, %int52_1081 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1145, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int8 = torch.constant.int 8 | |
%int1_1082 = torch.constant.int 1 | |
%1146 = torch.aten.add.Scalar %1145, %int8, %int1_1082 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1146, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_1083 = torch.constant.int 1 | |
%int16_1084 = torch.constant.int 16 | |
%int32_1085 = torch.constant.int 32 | |
%int100_1086 = torch.constant.int 100 | |
%1147 = torch.prim.ListConstruct %int1_1083, %368, %int16_1084, %int32_1085, %int100_1086 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1148 = torch.aten.view %1144, %1147 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1148, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1087 = torch.constant.int 16 | |
%int32_1088 = torch.constant.int 32 | |
%int100_1089 = torch.constant.int 100 | |
%1149 = torch.prim.ListConstruct %368, %int16_1087, %int32_1088, %int100_1089 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1150 = torch.aten.view %1148, %1149 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1150, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1151 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1152 = torch.aten.view %1146, %1151 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1152, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_1090 = torch.constant.int 1 | |
%int16_1091 = torch.constant.int 16 | |
%int32_1092 = torch.constant.int 32 | |
%int100_1093 = torch.constant.int 100 | |
%1153 = torch.prim.ListConstruct %int1_1090, %368, %int16_1091, %int32_1092, %int100_1093 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1154 = torch.aten.view %1086, %1153 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1154, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1094 = torch.constant.int 16 | |
%int32_1095 = torch.constant.int 32 | |
%int100_1096 = torch.constant.int 100 | |
%1155 = torch.prim.ListConstruct %368, %int16_1094, %int32_1095, %int100_1096 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1156 = torch.aten.view %1154, %1155 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1156, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_1097 = torch.constant.int 1 | |
%int1_1098 = torch.constant.int 1 | |
%1157 = torch.aten.add.Scalar %1146, %int1_1097, %int1_1098 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1157, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%1158 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1159 = torch.aten.view %1157, %1158 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1159, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%1160 = torch.prim.ListConstruct %1152, %1159 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_1099 = torch.constant.int 0 | |
%1161 = torch.aten.cat %1160, %int0_1099 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1161, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%1162 = torch.prim.ListConstruct %1150, %1156 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_1100 = torch.constant.int 0 | |
%1163 = torch.aten.cat %1162, %int0_1100 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1163, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1101 = torch.constant.int 26 | |
%int2_1102 = torch.constant.int 2 | |
%int16_1103 = torch.constant.int 16 | |
%int32_1104 = torch.constant.int 32 | |
%int100_1105 = torch.constant.int 100 | |
%1164 = torch.prim.ListConstruct %359, %int26_1101, %int2_1102, %int16_1103, %int32_1104, %int100_1105 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1165 = torch.aten.view %979, %1164 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1165, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_1106 = torch.constant.int 26 | |
%1166 = torch.aten.mul.int %359, %int26_1106 : !torch.int, !torch.int -> !torch.int | |
%int2_1107 = torch.constant.int 2 | |
%1167 = torch.aten.mul.int %1166, %int2_1107 : !torch.int, !torch.int -> !torch.int | |
%int16_1108 = torch.constant.int 16 | |
%int32_1109 = torch.constant.int 32 | |
%int100_1110 = torch.constant.int 100 | |
%1168 = torch.prim.ListConstruct %1167, %int16_1108, %int32_1109, %int100_1110 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1169 = torch.aten.view %1165, %1168 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1169, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1170 = torch.prim.ListConstruct %1161 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_1111 = torch.constant.bool false | |
%1171 = torch.aten.index_put %1169, %1170, %1163, %false_1111 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1171, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1112 = torch.constant.int 26 | |
%int2_1113 = torch.constant.int 2 | |
%int16_1114 = torch.constant.int 16 | |
%int32_1115 = torch.constant.int 32 | |
%int100_1116 = torch.constant.int 100 | |
%1172 = torch.prim.ListConstruct %359, %int26_1112, %int2_1113, %int16_1114, %int32_1115, %int100_1116 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1173 = torch.aten.view %1171, %1172 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1173, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_1117 = torch.constant.int 2662400 | |
%1174 = torch.prim.ListConstruct %359, %int2662400_1117 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1175 = torch.aten.view %1173, %1174 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %1175, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_1118 = torch.constant.int 1 | |
%int2_1119 = torch.constant.int 2 | |
%1176 = torch.aten.transpose.int %1115, %int1_1118, %int2_1119 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1176, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1120 = torch.constant.int 1 | |
%int2_1121 = torch.constant.int 2 | |
%1177 = torch.aten.transpose.int %1144, %int1_1120, %int2_1121 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1177, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1122 = torch.constant.int 1 | |
%int2_1123 = torch.constant.int 2 | |
%1178 = torch.aten.transpose.int %1086, %int1_1122, %int2_1123 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1178, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_1124 = torch.constant.int 2 | |
%int3_1125 = torch.constant.int 3 | |
%1179 = torch.aten.transpose.int %1177, %int2_1124, %int3_1125 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1179, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_1126 = torch.constant.int 1 | |
%int32_1127 = torch.constant.int 32 | |
%int100_1128 = torch.constant.int 100 | |
%1180 = torch.prim.ListConstruct %int1_1126, %int32_1127, %1101, %int100_1128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1129 = torch.constant.bool false | |
%1181 = torch.aten.expand %1176, %1180, %false_1129 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1181, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1130 = torch.constant.int 32 | |
%int100_1131 = torch.constant.int 100 | |
%1182 = torch.prim.ListConstruct %int32_1130, %1101, %int100_1131 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1183 = torch.aten.view %1181, %1182 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1183, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1132 = torch.constant.int 1 | |
%int32_1133 = torch.constant.int 32 | |
%int100_1134 = torch.constant.int 100 | |
%1184 = torch.prim.ListConstruct %int1_1132, %int32_1133, %int100_1134, %1130 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1135 = torch.constant.bool false | |
%1185 = torch.aten.expand %1179, %1184, %false_1135 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1185, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_1136 = torch.constant.int 32 | |
%int100_1137 = torch.constant.int 100 | |
%1186 = torch.prim.ListConstruct %int32_1136, %int100_1137, %1130 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1187 = torch.aten.view %1185, %1186 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %1187, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%1188 = torch.aten.bmm %1183, %1187 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1188, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1138 = torch.constant.int 1 | |
%int32_1139 = torch.constant.int 32 | |
%1189 = torch.prim.ListConstruct %int1_1138, %int32_1139, %1101, %1130 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1190 = torch.aten.view %1188, %1189 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1190, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_1140 = torch.constant.float 1.000000e+01 | |
%1191 = torch.aten.div.Scalar %1190, %float1.000000e01_1140 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1191, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1141 = torch.constant.int 1 | |
%1192 = torch.aten.add.Tensor %1191, %266, %int1_1141 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1192, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_1142 = torch.constant.int 6 | |
%1193 = torch.prims.convert_element_type %1192, %int6_1142 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1193, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_1143 = torch.constant.int -1 | |
%false_1144 = torch.constant.bool false | |
%1194 = torch.aten._softmax %1193, %int-1_1143, %false_1144 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1194, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_1145 = torch.constant.int 5 | |
%1195 = torch.prims.convert_element_type %1194, %int5_1145 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1195, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1146 = torch.constant.int 1 | |
%int32_1147 = torch.constant.int 32 | |
%1196 = torch.prim.ListConstruct %int1_1146, %int32_1147, %1101, %1130 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1148 = torch.constant.bool false | |
%1197 = torch.aten.expand %1195, %1196, %false_1148 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1197, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_1149 = torch.constant.int 32 | |
%1198 = torch.prim.ListConstruct %int32_1149, %1101, %1130 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1199 = torch.aten.view %1197, %1198 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1199, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1150 = torch.constant.int 1 | |
%1200 = torch.aten.size.int %1080, %int1_1150 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_1151 = torch.constant.int 1 | |
%int32_1152 = torch.constant.int 32 | |
%int100_1153 = torch.constant.int 100 | |
%1201 = torch.prim.ListConstruct %int1_1151, %int32_1152, %1200, %int100_1153 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1154 = torch.constant.bool false | |
%1202 = torch.aten.expand %1178, %1201, %false_1154 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1202, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1155 = torch.constant.int 32 | |
%int100_1156 = torch.constant.int 100 | |
%1203 = torch.prim.ListConstruct %int32_1155, %1200, %int100_1156 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1204 = torch.aten.view %1202, %1203 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1204, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%1205 = torch.aten.bmm %1199, %1204 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1205, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1157 = torch.constant.int 1 | |
%int32_1158 = torch.constant.int 32 | |
%int100_1159 = torch.constant.int 100 | |
%1206 = torch.prim.ListConstruct %int1_1157, %int32_1158, %1101, %int100_1159 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1207 = torch.aten.view %1205, %1206 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1207, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1160 = torch.constant.int 1 | |
%int2_1161 = torch.constant.int 2 | |
%1208 = torch.aten.transpose.int %1207, %int1_1160, %int2_1161 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1208, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_1162 = torch.constant.int 0 | |
%1209 = torch.aten.clone %1208, %int0_1162 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1209, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1163 = torch.constant.int 1 | |
%int3200_1164 = torch.constant.int 3200 | |
%1210 = torch.prim.ListConstruct %int1_1163, %1101, %int3200_1164 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1211 = torch.aten._unsafe_view %1209, %1210 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1211, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1165 = torch.constant.int -2 | |
%int-1_1166 = torch.constant.int -1 | |
%1212 = torch.aten.transpose.int %41, %int-2_1165, %int-1_1166 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1167 = torch.constant.int 3200 | |
%1213 = torch.prim.ListConstruct %1101, %int3200_1167 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1214 = torch.aten.view %1211, %1213 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1214, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1215 = torch.aten.mm %1214, %1212 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1215, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1168 = torch.constant.int 1 | |
%int3200_1169 = torch.constant.int 3200 | |
%1216 = torch.prim.ListConstruct %int1_1168, %1101, %int3200_1169 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1217 = torch.aten.view %1215, %1216 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1217, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1170 = torch.constant.int 1 | |
%1218 = torch.aten.add.Tensor %1053, %1217, %int1_1170 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1218, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1171 = torch.constant.int 6 | |
%1219 = torch.prims.convert_element_type %1218, %int6_1171 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1219, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1172 = torch.constant.int 2 | |
%1220 = torch.aten.pow.Tensor_Scalar %1219, %int2_1172 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1220, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1173 = torch.constant.int -1 | |
%1221 = torch.prim.ListConstruct %int-1_1173 : (!torch.int) -> !torch.list<int> | |
%true_1174 = torch.constant.bool true | |
%none_1175 = torch.constant.none | |
%1222 = torch.aten.mean.dim %1220, %1221, %true_1174, %none_1175 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1222, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1176 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1177 = torch.constant.int 1 | |
%1223 = torch.aten.add.Scalar %1222, %float9.999990e-07_1176, %int1_1177 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1223, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1224 = torch.aten.rsqrt %1223 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1224, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1225 = torch.aten.mul.Tensor %1219, %1224 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1225, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1226 = torch.aten.mul.Tensor %42, %1225 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1226, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1178 = torch.constant.int 5 | |
%1227 = torch.prims.convert_element_type %1226, %int5_1178 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1227, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1179 = torch.constant.int -2 | |
%int-1_1180 = torch.constant.int -1 | |
%1228 = torch.aten.transpose.int %43, %int-2_1179, %int-1_1180 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1181 = torch.constant.int 3200 | |
%1229 = torch.prim.ListConstruct %240, %int3200_1181 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1230 = torch.aten.view %1227, %1229 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1230, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1231 = torch.aten.mm %1230, %1228 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1231, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1182 = torch.constant.int 1 | |
%int8640_1183 = torch.constant.int 8640 | |
%1232 = torch.prim.ListConstruct %int1_1182, %240, %int8640_1183 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1233 = torch.aten.view %1231, %1232 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1233, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1234 = torch.aten.silu %1233 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1234, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1184 = torch.constant.int -2 | |
%int-1_1185 = torch.constant.int -1 | |
%1235 = torch.aten.transpose.int %44, %int-2_1184, %int-1_1185 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1186 = torch.constant.int 3200 | |
%1236 = torch.prim.ListConstruct %240, %int3200_1186 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1237 = torch.aten.view %1227, %1236 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1237, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1238 = torch.aten.mm %1237, %1235 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1238, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1187 = torch.constant.int 1 | |
%int8640_1188 = torch.constant.int 8640 | |
%1239 = torch.prim.ListConstruct %int1_1187, %240, %int8640_1188 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1240 = torch.aten.view %1238, %1239 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1240, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1241 = torch.aten.mul.Tensor %1234, %1240 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1241, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1189 = torch.constant.int -2 | |
%int-1_1190 = torch.constant.int -1 | |
%1242 = torch.aten.transpose.int %45, %int-2_1189, %int-1_1190 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_1191 = torch.constant.int 1 | |
%1243 = torch.aten.size.int %1233, %int1_1191 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_1192 = torch.constant.int 8640 | |
%1244 = torch.prim.ListConstruct %1243, %int8640_1192 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1245 = torch.aten.view %1241, %1244 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1245, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%1246 = torch.aten.mm %1245, %1242 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1246, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1193 = torch.constant.int 1 | |
%int3200_1194 = torch.constant.int 3200 | |
%1247 = torch.prim.ListConstruct %int1_1193, %1243, %int3200_1194 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1248 = torch.aten.view %1246, %1247 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1248, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1195 = torch.constant.int 1 | |
%1249 = torch.aten.add.Tensor %1218, %1248, %int1_1195 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1249, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1196 = torch.constant.int 6 | |
%1250 = torch.prims.convert_element_type %1249, %int6_1196 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1250, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1197 = torch.constant.int 2 | |
%1251 = torch.aten.pow.Tensor_Scalar %1250, %int2_1197 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1251, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1198 = torch.constant.int -1 | |
%1252 = torch.prim.ListConstruct %int-1_1198 : (!torch.int) -> !torch.list<int> | |
%true_1199 = torch.constant.bool true | |
%none_1200 = torch.constant.none | |
%1253 = torch.aten.mean.dim %1251, %1252, %true_1199, %none_1200 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1253, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1201 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1202 = torch.constant.int 1 | |
%1254 = torch.aten.add.Scalar %1253, %float9.999990e-07_1201, %int1_1202 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1254, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1255 = torch.aten.rsqrt %1254 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1255, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1256 = torch.aten.mul.Tensor %1250, %1255 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1256, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1257 = torch.aten.mul.Tensor %46, %1256 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1257, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1203 = torch.constant.int 5 | |
%1258 = torch.prims.convert_element_type %1257, %int5_1203 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1258, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1204 = torch.constant.int -2 | |
%int-1_1205 = torch.constant.int -1 | |
%1259 = torch.aten.transpose.int %47, %int-2_1204, %int-1_1205 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1206 = torch.constant.int 3200 | |
%1260 = torch.prim.ListConstruct %240, %int3200_1206 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1261 = torch.aten.view %1258, %1260 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1261, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1262 = torch.aten.mm %1261, %1259 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1262, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1207 = torch.constant.int 1 | |
%int3200_1208 = torch.constant.int 3200 | |
%1263 = torch.prim.ListConstruct %int1_1207, %240, %int3200_1208 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1264 = torch.aten.view %1262, %1263 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1264, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1209 = torch.constant.int -2 | |
%int-1_1210 = torch.constant.int -1 | |
%1265 = torch.aten.transpose.int %48, %int-2_1209, %int-1_1210 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1211 = torch.constant.int 3200 | |
%1266 = torch.prim.ListConstruct %240, %int3200_1211 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1267 = torch.aten.view %1258, %1266 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1267, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1268 = torch.aten.mm %1267, %1265 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1268, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1212 = torch.constant.int 1 | |
%int3200_1213 = torch.constant.int 3200 | |
%1269 = torch.prim.ListConstruct %int1_1212, %240, %int3200_1213 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1270 = torch.aten.view %1268, %1269 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1270, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1214 = torch.constant.int -2 | |
%int-1_1215 = torch.constant.int -1 | |
%1271 = torch.aten.transpose.int %49, %int-2_1214, %int-1_1215 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1216 = torch.constant.int 3200 | |
%1272 = torch.prim.ListConstruct %240, %int3200_1216 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1273 = torch.aten.view %1258, %1272 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1273, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1274 = torch.aten.mm %1273, %1271 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1274, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1217 = torch.constant.int 1 | |
%int3200_1218 = torch.constant.int 3200 | |
%1275 = torch.prim.ListConstruct %int1_1217, %240, %int3200_1218 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1276 = torch.aten.view %1274, %1275 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1276, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1219 = torch.constant.int 1 | |
%int32_1220 = torch.constant.int 32 | |
%int100_1221 = torch.constant.int 100 | |
%1277 = torch.prim.ListConstruct %int1_1219, %240, %int32_1220, %int100_1221 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1278 = torch.aten.view %1264, %1277 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1278, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1222 = torch.constant.int 1 | |
%int32_1223 = torch.constant.int 32 | |
%int100_1224 = torch.constant.int 100 | |
%1279 = torch.prim.ListConstruct %int1_1222, %240, %int32_1223, %int100_1224 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1280 = torch.aten.view %1270, %1279 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1280, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1225 = torch.constant.int 1 | |
%int32_1226 = torch.constant.int 32 | |
%int100_1227 = torch.constant.int 100 | |
%1281 = torch.prim.ListConstruct %int1_1225, %240, %int32_1226, %int100_1227 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1282 = torch.aten.view %1276, %1281 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1282, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1228 = torch.constant.int 2048 | |
%none_1229 = torch.constant.none | |
%none_1230 = torch.constant.none | |
%cpu_1231 = torch.constant.device "cpu" | |
%false_1232 = torch.constant.bool false | |
%1283 = torch.aten.arange %int2048_1228, %none_1229, %none_1230, %cpu_1231, %false_1232 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1233 = torch.constant.int 0 | |
%int100_1234 = torch.constant.int 100 | |
%int2_1235 = torch.constant.int 2 | |
%none_1236 = torch.constant.none | |
%none_1237 = torch.constant.none | |
%cpu_1238 = torch.constant.device "cpu" | |
%false_1239 = torch.constant.bool false | |
%1284 = torch.aten.arange.start_step %int0_1233, %int100_1234, %int2_1235, %none_1236, %none_1237, %cpu_1238, %false_1239 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1240 = torch.constant.int 0 | |
%int0_1241 = torch.constant.int 0 | |
%int50_1242 = torch.constant.int 50 | |
%int1_1243 = torch.constant.int 1 | |
%1285 = torch.aten.slice.Tensor %1284, %int0_1240, %int0_1241, %int50_1242, %int1_1243 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1244 = torch.constant.int 6 | |
%1286 = torch.prims.convert_element_type %1285, %int6_1244 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1245 = torch.constant.int 100 | |
%1287 = torch.aten.div.Scalar %1286, %int100_1245 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1246 = torch.constant.float 1.000000e+04 | |
%1288 = torch.aten.pow.Scalar %float1.000000e04_1246, %1287 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1289 = torch.aten.reciprocal %1288 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1247 = torch.constant.float 1.000000e+00 | |
%1290 = torch.aten.mul.Scalar %1289, %float1.000000e00_1247 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1248 = torch.constant.int 2048 | |
%int1_1249 = torch.constant.int 1 | |
%1291 = torch.prim.ListConstruct %int2048_1248, %int1_1249 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1292 = torch.aten.view %1283, %1291 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1293 = torch.aten.mul.Tensor %1292, %1290 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1294 = torch.aten.cos %1293 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1295 = torch.aten.sin %1293 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1296 = torch.aten.complex %1294, %1295 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1250 = torch.constant.int 1 | |
%1297 = torch.aten.size.int %1264, %int1_1250 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1251 = torch.constant.int 0 | |
%1298 = torch.aten.add.int %int0_1251, %1297 : !torch.int, !torch.int -> !torch.int | |
%int0_1252 = torch.constant.int 0 | |
%int0_1253 = torch.constant.int 0 | |
%int1_1254 = torch.constant.int 1 | |
%1299 = torch.aten.slice.Tensor %1296, %int0_1252, %int0_1253, %1298, %int1_1254 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1299, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1255 = torch.constant.int 1 | |
%int0_1256 = torch.constant.int 0 | |
%int9223372036854775807_1257 = torch.constant.int 9223372036854775807 | |
%int1_1258 = torch.constant.int 1 | |
%1300 = torch.aten.slice.Tensor %1299, %int1_1255, %int0_1256, %int9223372036854775807_1257, %int1_1258 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1300, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1259 = torch.constant.int 0 | |
%1301 = torch.aten.unsqueeze %1300, %int0_1259 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1301, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1260 = torch.constant.int 2 | |
%1302 = torch.aten.unsqueeze %1301, %int2_1260 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1302, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1261 = torch.constant.int 3 | |
%int0_1262 = torch.constant.int 0 | |
%int9223372036854775807_1263 = torch.constant.int 9223372036854775807 | |
%int1_1264 = torch.constant.int 1 | |
%1303 = torch.aten.slice.Tensor %1302, %int3_1261, %int0_1262, %int9223372036854775807_1263, %int1_1264 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1303, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1304 = torch_c.to_builtin_tensor %1278 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1265 = arith.constant 1 : index | |
%dim_1266 = tensor.dim %1304, %c1_1265 : tensor<1x?x32x100xf16> | |
%1305 = flow.tensor.bitcast %1304 : tensor<1x?x32x100xf16>{%dim_1266} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1266} | |
%1306 = torch_c.from_builtin_tensor %1305 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1306, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1307 = torch.aten.mul.Tensor %1306, %1303 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1307, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1308 = torch_c.to_builtin_tensor %1307 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1267 = arith.constant 1 : index | |
%dim_1268 = tensor.dim %1308, %c1_1267 : tensor<1x?x32x50xcomplex<f32>> | |
%1309 = flow.tensor.bitcast %1308 : tensor<1x?x32x50xcomplex<f32>>{%dim_1268} -> tensor<1x?x32x100xf32>{%dim_1268} | |
%1310 = torch_c.from_builtin_tensor %1309 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1310, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1269 = torch.constant.int 5 | |
%1311 = torch.prims.convert_element_type %1310, %int5_1269 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1311, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1270 = torch.constant.int 2048 | |
%none_1271 = torch.constant.none | |
%none_1272 = torch.constant.none | |
%cpu_1273 = torch.constant.device "cpu" | |
%false_1274 = torch.constant.bool false | |
%1312 = torch.aten.arange %int2048_1270, %none_1271, %none_1272, %cpu_1273, %false_1274 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1275 = torch.constant.int 0 | |
%int100_1276 = torch.constant.int 100 | |
%int2_1277 = torch.constant.int 2 | |
%none_1278 = torch.constant.none | |
%none_1279 = torch.constant.none | |
%cpu_1280 = torch.constant.device "cpu" | |
%false_1281 = torch.constant.bool false | |
%1313 = torch.aten.arange.start_step %int0_1275, %int100_1276, %int2_1277, %none_1278, %none_1279, %cpu_1280, %false_1281 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1282 = torch.constant.int 0 | |
%int0_1283 = torch.constant.int 0 | |
%int50_1284 = torch.constant.int 50 | |
%int1_1285 = torch.constant.int 1 | |
%1314 = torch.aten.slice.Tensor %1313, %int0_1282, %int0_1283, %int50_1284, %int1_1285 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1286 = torch.constant.int 6 | |
%1315 = torch.prims.convert_element_type %1314, %int6_1286 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1287 = torch.constant.int 100 | |
%1316 = torch.aten.div.Scalar %1315, %int100_1287 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1288 = torch.constant.float 1.000000e+04 | |
%1317 = torch.aten.pow.Scalar %float1.000000e04_1288, %1316 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1318 = torch.aten.reciprocal %1317 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1289 = torch.constant.float 1.000000e+00 | |
%1319 = torch.aten.mul.Scalar %1318, %float1.000000e00_1289 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1290 = torch.constant.int 2048 | |
%int1_1291 = torch.constant.int 1 | |
%1320 = torch.prim.ListConstruct %int2048_1290, %int1_1291 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1321 = torch.aten.view %1312, %1320 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1322 = torch.aten.mul.Tensor %1321, %1319 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1323 = torch.aten.cos %1322 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1324 = torch.aten.sin %1322 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1325 = torch.aten.complex %1323, %1324 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1292 = torch.constant.int 1 | |
%1326 = torch.aten.size.int %1270, %int1_1292 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1293 = torch.constant.int 0 | |
%1327 = torch.aten.add.int %int0_1293, %1326 : !torch.int, !torch.int -> !torch.int | |
%int0_1294 = torch.constant.int 0 | |
%int0_1295 = torch.constant.int 0 | |
%int1_1296 = torch.constant.int 1 | |
%1328 = torch.aten.slice.Tensor %1325, %int0_1294, %int0_1295, %1327, %int1_1296 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1328, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1297 = torch.constant.int 1 | |
%int0_1298 = torch.constant.int 0 | |
%int9223372036854775807_1299 = torch.constant.int 9223372036854775807 | |
%int1_1300 = torch.constant.int 1 | |
%1329 = torch.aten.slice.Tensor %1328, %int1_1297, %int0_1298, %int9223372036854775807_1299, %int1_1300 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1329, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1301 = torch.constant.int 0 | |
%1330 = torch.aten.unsqueeze %1329, %int0_1301 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1330, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1302 = torch.constant.int 2 | |
%1331 = torch.aten.unsqueeze %1330, %int2_1302 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1331, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1303 = torch.constant.int 3 | |
%int0_1304 = torch.constant.int 0 | |
%int9223372036854775807_1305 = torch.constant.int 9223372036854775807 | |
%int1_1306 = torch.constant.int 1 | |
%1332 = torch.aten.slice.Tensor %1331, %int3_1303, %int0_1304, %int9223372036854775807_1305, %int1_1306 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1332, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1333 = torch_c.to_builtin_tensor %1280 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1307 = arith.constant 1 : index | |
%dim_1308 = tensor.dim %1333, %c1_1307 : tensor<1x?x32x100xf16> | |
%1334 = flow.tensor.bitcast %1333 : tensor<1x?x32x100xf16>{%dim_1308} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1308} | |
%1335 = torch_c.from_builtin_tensor %1334 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1335, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1336 = torch.aten.mul.Tensor %1335, %1332 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1336, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1337 = torch_c.to_builtin_tensor %1336 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1309 = arith.constant 1 : index | |
%dim_1310 = tensor.dim %1337, %c1_1309 : tensor<1x?x32x50xcomplex<f32>> | |
%1338 = flow.tensor.bitcast %1337 : tensor<1x?x32x50xcomplex<f32>>{%dim_1310} -> tensor<1x?x32x100xf32>{%dim_1310} | |
%1339 = torch_c.from_builtin_tensor %1338 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1339, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1311 = torch.constant.int 5 | |
%1340 = torch.prims.convert_element_type %1339, %int5_1311 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1340, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_1312 = torch.constant.int 52 | |
%1341 = torch.aten.mul.Scalar %arg2, %int52_1312 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1341, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int10 = torch.constant.int 10 | |
%int1_1313 = torch.constant.int 1 | |
%1342 = torch.aten.add.Scalar %1341, %int10, %int1_1313 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1342, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_1314 = torch.constant.int 1 | |
%int16_1315 = torch.constant.int 16 | |
%int32_1316 = torch.constant.int 32 | |
%int100_1317 = torch.constant.int 100 | |
%1343 = torch.prim.ListConstruct %int1_1314, %368, %int16_1315, %int32_1316, %int100_1317 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1344 = torch.aten.view %1340, %1343 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1344, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1318 = torch.constant.int 16 | |
%int32_1319 = torch.constant.int 32 | |
%int100_1320 = torch.constant.int 100 | |
%1345 = torch.prim.ListConstruct %368, %int16_1318, %int32_1319, %int100_1320 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1346 = torch.aten.view %1344, %1345 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1346, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1347 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1348 = torch.aten.view %1342, %1347 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1348, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_1321 = torch.constant.int 1 | |
%int16_1322 = torch.constant.int 16 | |
%int32_1323 = torch.constant.int 32 | |
%int100_1324 = torch.constant.int 100 | |
%1349 = torch.prim.ListConstruct %int1_1321, %368, %int16_1322, %int32_1323, %int100_1324 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1350 = torch.aten.view %1282, %1349 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1350, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1325 = torch.constant.int 16 | |
%int32_1326 = torch.constant.int 32 | |
%int100_1327 = torch.constant.int 100 | |
%1351 = torch.prim.ListConstruct %368, %int16_1325, %int32_1326, %int100_1327 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1352 = torch.aten.view %1350, %1351 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1352, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_1328 = torch.constant.int 1 | |
%int1_1329 = torch.constant.int 1 | |
%1353 = torch.aten.add.Scalar %1342, %int1_1328, %int1_1329 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1353, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%1354 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1355 = torch.aten.view %1353, %1354 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1355, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%1356 = torch.prim.ListConstruct %1348, %1355 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_1330 = torch.constant.int 0 | |
%1357 = torch.aten.cat %1356, %int0_1330 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1357, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%1358 = torch.prim.ListConstruct %1346, %1352 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_1331 = torch.constant.int 0 | |
%1359 = torch.aten.cat %1358, %int0_1331 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1359, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1332 = torch.constant.int 26 | |
%int2_1333 = torch.constant.int 2 | |
%int16_1334 = torch.constant.int 16 | |
%int32_1335 = torch.constant.int 32 | |
%int100_1336 = torch.constant.int 100 | |
%1360 = torch.prim.ListConstruct %359, %int26_1332, %int2_1333, %int16_1334, %int32_1335, %int100_1336 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1361 = torch.aten.view %1175, %1360 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1361, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_1337 = torch.constant.int 26 | |
%1362 = torch.aten.mul.int %359, %int26_1337 : !torch.int, !torch.int -> !torch.int | |
%int2_1338 = torch.constant.int 2 | |
%1363 = torch.aten.mul.int %1362, %int2_1338 : !torch.int, !torch.int -> !torch.int | |
%int16_1339 = torch.constant.int 16 | |
%int32_1340 = torch.constant.int 32 | |
%int100_1341 = torch.constant.int 100 | |
%1364 = torch.prim.ListConstruct %1363, %int16_1339, %int32_1340, %int100_1341 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1365 = torch.aten.view %1361, %1364 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1365, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1366 = torch.prim.ListConstruct %1357 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_1342 = torch.constant.bool false | |
%1367 = torch.aten.index_put %1365, %1366, %1359, %false_1342 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1367, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1343 = torch.constant.int 26 | |
%int2_1344 = torch.constant.int 2 | |
%int16_1345 = torch.constant.int 16 | |
%int32_1346 = torch.constant.int 32 | |
%int100_1347 = torch.constant.int 100 | |
%1368 = torch.prim.ListConstruct %359, %int26_1343, %int2_1344, %int16_1345, %int32_1346, %int100_1347 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1369 = torch.aten.view %1367, %1368 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1369, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_1348 = torch.constant.int 2662400 | |
%1370 = torch.prim.ListConstruct %359, %int2662400_1348 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1371 = torch.aten.view %1369, %1370 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %1371, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_1349 = torch.constant.int 1 | |
%int2_1350 = torch.constant.int 2 | |
%1372 = torch.aten.transpose.int %1311, %int1_1349, %int2_1350 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1372, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1351 = torch.constant.int 1 | |
%int2_1352 = torch.constant.int 2 | |
%1373 = torch.aten.transpose.int %1340, %int1_1351, %int2_1352 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1373, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1353 = torch.constant.int 1 | |
%int2_1354 = torch.constant.int 2 | |
%1374 = torch.aten.transpose.int %1282, %int1_1353, %int2_1354 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1374, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_1355 = torch.constant.int 2 | |
%int3_1356 = torch.constant.int 3 | |
%1375 = torch.aten.transpose.int %1373, %int2_1355, %int3_1356 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1375, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_1357 = torch.constant.int 1 | |
%int32_1358 = torch.constant.int 32 | |
%int100_1359 = torch.constant.int 100 | |
%1376 = torch.prim.ListConstruct %int1_1357, %int32_1358, %1297, %int100_1359 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1360 = torch.constant.bool false | |
%1377 = torch.aten.expand %1372, %1376, %false_1360 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1377, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1361 = torch.constant.int 32 | |
%int100_1362 = torch.constant.int 100 | |
%1378 = torch.prim.ListConstruct %int32_1361, %1297, %int100_1362 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1379 = torch.aten.view %1377, %1378 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1379, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1363 = torch.constant.int 1 | |
%int32_1364 = torch.constant.int 32 | |
%int100_1365 = torch.constant.int 100 | |
%1380 = torch.prim.ListConstruct %int1_1363, %int32_1364, %int100_1365, %1326 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1366 = torch.constant.bool false | |
%1381 = torch.aten.expand %1375, %1380, %false_1366 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1381, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_1367 = torch.constant.int 32 | |
%int100_1368 = torch.constant.int 100 | |
%1382 = torch.prim.ListConstruct %int32_1367, %int100_1368, %1326 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1383 = torch.aten.view %1381, %1382 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %1383, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%1384 = torch.aten.bmm %1379, %1383 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1384, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1369 = torch.constant.int 1 | |
%int32_1370 = torch.constant.int 32 | |
%1385 = torch.prim.ListConstruct %int1_1369, %int32_1370, %1297, %1326 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1386 = torch.aten.view %1384, %1385 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1386, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_1371 = torch.constant.float 1.000000e+01 | |
%1387 = torch.aten.div.Scalar %1386, %float1.000000e01_1371 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1387, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1372 = torch.constant.int 1 | |
%1388 = torch.aten.add.Tensor %1387, %266, %int1_1372 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1388, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_1373 = torch.constant.int 6 | |
%1389 = torch.prims.convert_element_type %1388, %int6_1373 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1389, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_1374 = torch.constant.int -1 | |
%false_1375 = torch.constant.bool false | |
%1390 = torch.aten._softmax %1389, %int-1_1374, %false_1375 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1390, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_1376 = torch.constant.int 5 | |
%1391 = torch.prims.convert_element_type %1390, %int5_1376 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1391, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1377 = torch.constant.int 1 | |
%int32_1378 = torch.constant.int 32 | |
%1392 = torch.prim.ListConstruct %int1_1377, %int32_1378, %1297, %1326 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1379 = torch.constant.bool false | |
%1393 = torch.aten.expand %1391, %1392, %false_1379 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1393, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_1380 = torch.constant.int 32 | |
%1394 = torch.prim.ListConstruct %int32_1380, %1297, %1326 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1395 = torch.aten.view %1393, %1394 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1395, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1381 = torch.constant.int 1 | |
%1396 = torch.aten.size.int %1276, %int1_1381 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_1382 = torch.constant.int 1 | |
%int32_1383 = torch.constant.int 32 | |
%int100_1384 = torch.constant.int 100 | |
%1397 = torch.prim.ListConstruct %int1_1382, %int32_1383, %1396, %int100_1384 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1385 = torch.constant.bool false | |
%1398 = torch.aten.expand %1374, %1397, %false_1385 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1398, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1386 = torch.constant.int 32 | |
%int100_1387 = torch.constant.int 100 | |
%1399 = torch.prim.ListConstruct %int32_1386, %1396, %int100_1387 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1400 = torch.aten.view %1398, %1399 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1400, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%1401 = torch.aten.bmm %1395, %1400 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1401, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1388 = torch.constant.int 1 | |
%int32_1389 = torch.constant.int 32 | |
%int100_1390 = torch.constant.int 100 | |
%1402 = torch.prim.ListConstruct %int1_1388, %int32_1389, %1297, %int100_1390 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1403 = torch.aten.view %1401, %1402 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1403, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1391 = torch.constant.int 1 | |
%int2_1392 = torch.constant.int 2 | |
%1404 = torch.aten.transpose.int %1403, %int1_1391, %int2_1392 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1404, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_1393 = torch.constant.int 0 | |
%1405 = torch.aten.clone %1404, %int0_1393 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1405, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1394 = torch.constant.int 1 | |
%int3200_1395 = torch.constant.int 3200 | |
%1406 = torch.prim.ListConstruct %int1_1394, %1297, %int3200_1395 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1407 = torch.aten._unsafe_view %1405, %1406 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1407, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1396 = torch.constant.int -2 | |
%int-1_1397 = torch.constant.int -1 | |
%1408 = torch.aten.transpose.int %50, %int-2_1396, %int-1_1397 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1398 = torch.constant.int 3200 | |
%1409 = torch.prim.ListConstruct %1297, %int3200_1398 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1410 = torch.aten.view %1407, %1409 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1410, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1411 = torch.aten.mm %1410, %1408 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1411, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1399 = torch.constant.int 1 | |
%int3200_1400 = torch.constant.int 3200 | |
%1412 = torch.prim.ListConstruct %int1_1399, %1297, %int3200_1400 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1413 = torch.aten.view %1411, %1412 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1413, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1401 = torch.constant.int 1 | |
%1414 = torch.aten.add.Tensor %1249, %1413, %int1_1401 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1414, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1402 = torch.constant.int 6 | |
%1415 = torch.prims.convert_element_type %1414, %int6_1402 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1415, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1403 = torch.constant.int 2 | |
%1416 = torch.aten.pow.Tensor_Scalar %1415, %int2_1403 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1416, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1404 = torch.constant.int -1 | |
%1417 = torch.prim.ListConstruct %int-1_1404 : (!torch.int) -> !torch.list<int> | |
%true_1405 = torch.constant.bool true | |
%none_1406 = torch.constant.none | |
%1418 = torch.aten.mean.dim %1416, %1417, %true_1405, %none_1406 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1418, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1407 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1408 = torch.constant.int 1 | |
%1419 = torch.aten.add.Scalar %1418, %float9.999990e-07_1407, %int1_1408 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1419, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1420 = torch.aten.rsqrt %1419 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1420, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1421 = torch.aten.mul.Tensor %1415, %1420 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1421, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1422 = torch.aten.mul.Tensor %51, %1421 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1422, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1409 = torch.constant.int 5 | |
%1423 = torch.prims.convert_element_type %1422, %int5_1409 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1423, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1410 = torch.constant.int -2 | |
%int-1_1411 = torch.constant.int -1 | |
%1424 = torch.aten.transpose.int %52, %int-2_1410, %int-1_1411 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1412 = torch.constant.int 3200 | |
%1425 = torch.prim.ListConstruct %240, %int3200_1412 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1426 = torch.aten.view %1423, %1425 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1426, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1427 = torch.aten.mm %1426, %1424 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1427, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1413 = torch.constant.int 1 | |
%int8640_1414 = torch.constant.int 8640 | |
%1428 = torch.prim.ListConstruct %int1_1413, %240, %int8640_1414 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1429 = torch.aten.view %1427, %1428 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1429, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1430 = torch.aten.silu %1429 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1430, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1415 = torch.constant.int -2 | |
%int-1_1416 = torch.constant.int -1 | |
%1431 = torch.aten.transpose.int %53, %int-2_1415, %int-1_1416 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1417 = torch.constant.int 3200 | |
%1432 = torch.prim.ListConstruct %240, %int3200_1417 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1433 = torch.aten.view %1423, %1432 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1433, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1434 = torch.aten.mm %1433, %1431 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1434, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1418 = torch.constant.int 1 | |
%int8640_1419 = torch.constant.int 8640 | |
%1435 = torch.prim.ListConstruct %int1_1418, %240, %int8640_1419 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1436 = torch.aten.view %1434, %1435 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1436, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1437 = torch.aten.mul.Tensor %1430, %1436 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1437, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1420 = torch.constant.int -2 | |
%int-1_1421 = torch.constant.int -1 | |
%1438 = torch.aten.transpose.int %54, %int-2_1420, %int-1_1421 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_1422 = torch.constant.int 1 | |
%1439 = torch.aten.size.int %1429, %int1_1422 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_1423 = torch.constant.int 8640 | |
%1440 = torch.prim.ListConstruct %1439, %int8640_1423 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1441 = torch.aten.view %1437, %1440 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1441, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%1442 = torch.aten.mm %1441, %1438 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1442, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1424 = torch.constant.int 1 | |
%int3200_1425 = torch.constant.int 3200 | |
%1443 = torch.prim.ListConstruct %int1_1424, %1439, %int3200_1425 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1444 = torch.aten.view %1442, %1443 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1444, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1426 = torch.constant.int 1 | |
%1445 = torch.aten.add.Tensor %1414, %1444, %int1_1426 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1445, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1427 = torch.constant.int 6 | |
%1446 = torch.prims.convert_element_type %1445, %int6_1427 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1446, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1428 = torch.constant.int 2 | |
%1447 = torch.aten.pow.Tensor_Scalar %1446, %int2_1428 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1447, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1429 = torch.constant.int -1 | |
%1448 = torch.prim.ListConstruct %int-1_1429 : (!torch.int) -> !torch.list<int> | |
%true_1430 = torch.constant.bool true | |
%none_1431 = torch.constant.none | |
%1449 = torch.aten.mean.dim %1447, %1448, %true_1430, %none_1431 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1449, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1432 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1433 = torch.constant.int 1 | |
%1450 = torch.aten.add.Scalar %1449, %float9.999990e-07_1432, %int1_1433 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1450, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1451 = torch.aten.rsqrt %1450 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1451, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1452 = torch.aten.mul.Tensor %1446, %1451 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1452, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1453 = torch.aten.mul.Tensor %55, %1452 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1453, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1434 = torch.constant.int 5 | |
%1454 = torch.prims.convert_element_type %1453, %int5_1434 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1454, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1435 = torch.constant.int -2 | |
%int-1_1436 = torch.constant.int -1 | |
%1455 = torch.aten.transpose.int %56, %int-2_1435, %int-1_1436 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1437 = torch.constant.int 3200 | |
%1456 = torch.prim.ListConstruct %240, %int3200_1437 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1457 = torch.aten.view %1454, %1456 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1457, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1458 = torch.aten.mm %1457, %1455 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1458, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1438 = torch.constant.int 1 | |
%int3200_1439 = torch.constant.int 3200 | |
%1459 = torch.prim.ListConstruct %int1_1438, %240, %int3200_1439 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1460 = torch.aten.view %1458, %1459 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1460, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1440 = torch.constant.int -2 | |
%int-1_1441 = torch.constant.int -1 | |
%1461 = torch.aten.transpose.int %57, %int-2_1440, %int-1_1441 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1442 = torch.constant.int 3200 | |
%1462 = torch.prim.ListConstruct %240, %int3200_1442 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1463 = torch.aten.view %1454, %1462 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1463, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1464 = torch.aten.mm %1463, %1461 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1464, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1443 = torch.constant.int 1 | |
%int3200_1444 = torch.constant.int 3200 | |
%1465 = torch.prim.ListConstruct %int1_1443, %240, %int3200_1444 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1466 = torch.aten.view %1464, %1465 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1466, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1445 = torch.constant.int -2 | |
%int-1_1446 = torch.constant.int -1 | |
%1467 = torch.aten.transpose.int %58, %int-2_1445, %int-1_1446 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1447 = torch.constant.int 3200 | |
%1468 = torch.prim.ListConstruct %240, %int3200_1447 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1469 = torch.aten.view %1454, %1468 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1469, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1470 = torch.aten.mm %1469, %1467 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1470, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1448 = torch.constant.int 1 | |
%int3200_1449 = torch.constant.int 3200 | |
%1471 = torch.prim.ListConstruct %int1_1448, %240, %int3200_1449 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1472 = torch.aten.view %1470, %1471 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1472, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1450 = torch.constant.int 1 | |
%int32_1451 = torch.constant.int 32 | |
%int100_1452 = torch.constant.int 100 | |
%1473 = torch.prim.ListConstruct %int1_1450, %240, %int32_1451, %int100_1452 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1474 = torch.aten.view %1460, %1473 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1474, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1453 = torch.constant.int 1 | |
%int32_1454 = torch.constant.int 32 | |
%int100_1455 = torch.constant.int 100 | |
%1475 = torch.prim.ListConstruct %int1_1453, %240, %int32_1454, %int100_1455 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1476 = torch.aten.view %1466, %1475 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1476, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1456 = torch.constant.int 1 | |
%int32_1457 = torch.constant.int 32 | |
%int100_1458 = torch.constant.int 100 | |
%1477 = torch.prim.ListConstruct %int1_1456, %240, %int32_1457, %int100_1458 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1478 = torch.aten.view %1472, %1477 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1478, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1459 = torch.constant.int 2048 | |
%none_1460 = torch.constant.none | |
%none_1461 = torch.constant.none | |
%cpu_1462 = torch.constant.device "cpu" | |
%false_1463 = torch.constant.bool false | |
%1479 = torch.aten.arange %int2048_1459, %none_1460, %none_1461, %cpu_1462, %false_1463 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1464 = torch.constant.int 0 | |
%int100_1465 = torch.constant.int 100 | |
%int2_1466 = torch.constant.int 2 | |
%none_1467 = torch.constant.none | |
%none_1468 = torch.constant.none | |
%cpu_1469 = torch.constant.device "cpu" | |
%false_1470 = torch.constant.bool false | |
%1480 = torch.aten.arange.start_step %int0_1464, %int100_1465, %int2_1466, %none_1467, %none_1468, %cpu_1469, %false_1470 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1471 = torch.constant.int 0 | |
%int0_1472 = torch.constant.int 0 | |
%int50_1473 = torch.constant.int 50 | |
%int1_1474 = torch.constant.int 1 | |
%1481 = torch.aten.slice.Tensor %1480, %int0_1471, %int0_1472, %int50_1473, %int1_1474 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1475 = torch.constant.int 6 | |
%1482 = torch.prims.convert_element_type %1481, %int6_1475 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1476 = torch.constant.int 100 | |
%1483 = torch.aten.div.Scalar %1482, %int100_1476 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1477 = torch.constant.float 1.000000e+04 | |
%1484 = torch.aten.pow.Scalar %float1.000000e04_1477, %1483 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1485 = torch.aten.reciprocal %1484 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1478 = torch.constant.float 1.000000e+00 | |
%1486 = torch.aten.mul.Scalar %1485, %float1.000000e00_1478 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1479 = torch.constant.int 2048 | |
%int1_1480 = torch.constant.int 1 | |
%1487 = torch.prim.ListConstruct %int2048_1479, %int1_1480 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1488 = torch.aten.view %1479, %1487 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1489 = torch.aten.mul.Tensor %1488, %1486 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1490 = torch.aten.cos %1489 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1491 = torch.aten.sin %1489 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1492 = torch.aten.complex %1490, %1491 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1481 = torch.constant.int 1 | |
%1493 = torch.aten.size.int %1460, %int1_1481 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1482 = torch.constant.int 0 | |
%1494 = torch.aten.add.int %int0_1482, %1493 : !torch.int, !torch.int -> !torch.int | |
%int0_1483 = torch.constant.int 0 | |
%int0_1484 = torch.constant.int 0 | |
%int1_1485 = torch.constant.int 1 | |
%1495 = torch.aten.slice.Tensor %1492, %int0_1483, %int0_1484, %1494, %int1_1485 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1495, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1486 = torch.constant.int 1 | |
%int0_1487 = torch.constant.int 0 | |
%int9223372036854775807_1488 = torch.constant.int 9223372036854775807 | |
%int1_1489 = torch.constant.int 1 | |
%1496 = torch.aten.slice.Tensor %1495, %int1_1486, %int0_1487, %int9223372036854775807_1488, %int1_1489 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1496, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1490 = torch.constant.int 0 | |
%1497 = torch.aten.unsqueeze %1496, %int0_1490 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1497, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1491 = torch.constant.int 2 | |
%1498 = torch.aten.unsqueeze %1497, %int2_1491 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1498, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1492 = torch.constant.int 3 | |
%int0_1493 = torch.constant.int 0 | |
%int9223372036854775807_1494 = torch.constant.int 9223372036854775807 | |
%int1_1495 = torch.constant.int 1 | |
%1499 = torch.aten.slice.Tensor %1498, %int3_1492, %int0_1493, %int9223372036854775807_1494, %int1_1495 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1499, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1500 = torch_c.to_builtin_tensor %1474 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1496 = arith.constant 1 : index | |
%dim_1497 = tensor.dim %1500, %c1_1496 : tensor<1x?x32x100xf16> | |
%1501 = flow.tensor.bitcast %1500 : tensor<1x?x32x100xf16>{%dim_1497} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1497} | |
%1502 = torch_c.from_builtin_tensor %1501 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1502, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1503 = torch.aten.mul.Tensor %1502, %1499 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1503, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1504 = torch_c.to_builtin_tensor %1503 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1498 = arith.constant 1 : index | |
%dim_1499 = tensor.dim %1504, %c1_1498 : tensor<1x?x32x50xcomplex<f32>> | |
%1505 = flow.tensor.bitcast %1504 : tensor<1x?x32x50xcomplex<f32>>{%dim_1499} -> tensor<1x?x32x100xf32>{%dim_1499} | |
%1506 = torch_c.from_builtin_tensor %1505 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1506, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1500 = torch.constant.int 5 | |
%1507 = torch.prims.convert_element_type %1506, %int5_1500 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1507, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1501 = torch.constant.int 2048 | |
%none_1502 = torch.constant.none | |
%none_1503 = torch.constant.none | |
%cpu_1504 = torch.constant.device "cpu" | |
%false_1505 = torch.constant.bool false | |
%1508 = torch.aten.arange %int2048_1501, %none_1502, %none_1503, %cpu_1504, %false_1505 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1506 = torch.constant.int 0 | |
%int100_1507 = torch.constant.int 100 | |
%int2_1508 = torch.constant.int 2 | |
%none_1509 = torch.constant.none | |
%none_1510 = torch.constant.none | |
%cpu_1511 = torch.constant.device "cpu" | |
%false_1512 = torch.constant.bool false | |
%1509 = torch.aten.arange.start_step %int0_1506, %int100_1507, %int2_1508, %none_1509, %none_1510, %cpu_1511, %false_1512 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1513 = torch.constant.int 0 | |
%int0_1514 = torch.constant.int 0 | |
%int50_1515 = torch.constant.int 50 | |
%int1_1516 = torch.constant.int 1 | |
%1510 = torch.aten.slice.Tensor %1509, %int0_1513, %int0_1514, %int50_1515, %int1_1516 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1517 = torch.constant.int 6 | |
%1511 = torch.prims.convert_element_type %1510, %int6_1517 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1518 = torch.constant.int 100 | |
%1512 = torch.aten.div.Scalar %1511, %int100_1518 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1519 = torch.constant.float 1.000000e+04 | |
%1513 = torch.aten.pow.Scalar %float1.000000e04_1519, %1512 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1514 = torch.aten.reciprocal %1513 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1520 = torch.constant.float 1.000000e+00 | |
%1515 = torch.aten.mul.Scalar %1514, %float1.000000e00_1520 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1521 = torch.constant.int 2048 | |
%int1_1522 = torch.constant.int 1 | |
%1516 = torch.prim.ListConstruct %int2048_1521, %int1_1522 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1517 = torch.aten.view %1508, %1516 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1518 = torch.aten.mul.Tensor %1517, %1515 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1519 = torch.aten.cos %1518 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1520 = torch.aten.sin %1518 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1521 = torch.aten.complex %1519, %1520 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1523 = torch.constant.int 1 | |
%1522 = torch.aten.size.int %1466, %int1_1523 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1524 = torch.constant.int 0 | |
%1523 = torch.aten.add.int %int0_1524, %1522 : !torch.int, !torch.int -> !torch.int | |
%int0_1525 = torch.constant.int 0 | |
%int0_1526 = torch.constant.int 0 | |
%int1_1527 = torch.constant.int 1 | |
%1524 = torch.aten.slice.Tensor %1521, %int0_1525, %int0_1526, %1523, %int1_1527 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1524, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1528 = torch.constant.int 1 | |
%int0_1529 = torch.constant.int 0 | |
%int9223372036854775807_1530 = torch.constant.int 9223372036854775807 | |
%int1_1531 = torch.constant.int 1 | |
%1525 = torch.aten.slice.Tensor %1524, %int1_1528, %int0_1529, %int9223372036854775807_1530, %int1_1531 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1525, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1532 = torch.constant.int 0 | |
%1526 = torch.aten.unsqueeze %1525, %int0_1532 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1526, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1533 = torch.constant.int 2 | |
%1527 = torch.aten.unsqueeze %1526, %int2_1533 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1527, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1534 = torch.constant.int 3 | |
%int0_1535 = torch.constant.int 0 | |
%int9223372036854775807_1536 = torch.constant.int 9223372036854775807 | |
%int1_1537 = torch.constant.int 1 | |
%1528 = torch.aten.slice.Tensor %1527, %int3_1534, %int0_1535, %int9223372036854775807_1536, %int1_1537 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1528, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1529 = torch_c.to_builtin_tensor %1476 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1538 = arith.constant 1 : index | |
%dim_1539 = tensor.dim %1529, %c1_1538 : tensor<1x?x32x100xf16> | |
%1530 = flow.tensor.bitcast %1529 : tensor<1x?x32x100xf16>{%dim_1539} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1539} | |
%1531 = torch_c.from_builtin_tensor %1530 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1531, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1532 = torch.aten.mul.Tensor %1531, %1528 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1532, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1533 = torch_c.to_builtin_tensor %1532 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1540 = arith.constant 1 : index | |
%dim_1541 = tensor.dim %1533, %c1_1540 : tensor<1x?x32x50xcomplex<f32>> | |
%1534 = flow.tensor.bitcast %1533 : tensor<1x?x32x50xcomplex<f32>>{%dim_1541} -> tensor<1x?x32x100xf32>{%dim_1541} | |
%1535 = torch_c.from_builtin_tensor %1534 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1535, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1542 = torch.constant.int 5 | |
%1536 = torch.prims.convert_element_type %1535, %int5_1542 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1536, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_1543 = torch.constant.int 52 | |
%1537 = torch.aten.mul.Scalar %arg2, %int52_1543 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1537, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int12 = torch.constant.int 12 | |
%int1_1544 = torch.constant.int 1 | |
%1538 = torch.aten.add.Scalar %1537, %int12, %int1_1544 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1538, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_1545 = torch.constant.int 1 | |
%int16_1546 = torch.constant.int 16 | |
%int32_1547 = torch.constant.int 32 | |
%int100_1548 = torch.constant.int 100 | |
%1539 = torch.prim.ListConstruct %int1_1545, %368, %int16_1546, %int32_1547, %int100_1548 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1540 = torch.aten.view %1536, %1539 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1540, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1549 = torch.constant.int 16 | |
%int32_1550 = torch.constant.int 32 | |
%int100_1551 = torch.constant.int 100 | |
%1541 = torch.prim.ListConstruct %368, %int16_1549, %int32_1550, %int100_1551 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1542 = torch.aten.view %1540, %1541 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1542, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1543 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1544 = torch.aten.view %1538, %1543 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1544, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_1552 = torch.constant.int 1 | |
%int16_1553 = torch.constant.int 16 | |
%int32_1554 = torch.constant.int 32 | |
%int100_1555 = torch.constant.int 100 | |
%1545 = torch.prim.ListConstruct %int1_1552, %368, %int16_1553, %int32_1554, %int100_1555 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1546 = torch.aten.view %1478, %1545 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1546, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1556 = torch.constant.int 16 | |
%int32_1557 = torch.constant.int 32 | |
%int100_1558 = torch.constant.int 100 | |
%1547 = torch.prim.ListConstruct %368, %int16_1556, %int32_1557, %int100_1558 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1548 = torch.aten.view %1546, %1547 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1548, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_1559 = torch.constant.int 1 | |
%int1_1560 = torch.constant.int 1 | |
%1549 = torch.aten.add.Scalar %1538, %int1_1559, %int1_1560 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1549, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%1550 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1551 = torch.aten.view %1549, %1550 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1551, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%1552 = torch.prim.ListConstruct %1544, %1551 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_1561 = torch.constant.int 0 | |
%1553 = torch.aten.cat %1552, %int0_1561 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1553, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%1554 = torch.prim.ListConstruct %1542, %1548 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_1562 = torch.constant.int 0 | |
%1555 = torch.aten.cat %1554, %int0_1562 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1555, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1563 = torch.constant.int 26 | |
%int2_1564 = torch.constant.int 2 | |
%int16_1565 = torch.constant.int 16 | |
%int32_1566 = torch.constant.int 32 | |
%int100_1567 = torch.constant.int 100 | |
%1556 = torch.prim.ListConstruct %359, %int26_1563, %int2_1564, %int16_1565, %int32_1566, %int100_1567 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1557 = torch.aten.view %1371, %1556 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1557, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_1568 = torch.constant.int 26 | |
%1558 = torch.aten.mul.int %359, %int26_1568 : !torch.int, !torch.int -> !torch.int | |
%int2_1569 = torch.constant.int 2 | |
%1559 = torch.aten.mul.int %1558, %int2_1569 : !torch.int, !torch.int -> !torch.int | |
%int16_1570 = torch.constant.int 16 | |
%int32_1571 = torch.constant.int 32 | |
%int100_1572 = torch.constant.int 100 | |
%1560 = torch.prim.ListConstruct %1559, %int16_1570, %int32_1571, %int100_1572 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1561 = torch.aten.view %1557, %1560 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1561, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1562 = torch.prim.ListConstruct %1553 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_1573 = torch.constant.bool false | |
%1563 = torch.aten.index_put %1561, %1562, %1555, %false_1573 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1563, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1574 = torch.constant.int 26 | |
%int2_1575 = torch.constant.int 2 | |
%int16_1576 = torch.constant.int 16 | |
%int32_1577 = torch.constant.int 32 | |
%int100_1578 = torch.constant.int 100 | |
%1564 = torch.prim.ListConstruct %359, %int26_1574, %int2_1575, %int16_1576, %int32_1577, %int100_1578 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1565 = torch.aten.view %1563, %1564 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1565, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_1579 = torch.constant.int 2662400 | |
%1566 = torch.prim.ListConstruct %359, %int2662400_1579 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1567 = torch.aten.view %1565, %1566 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %1567, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_1580 = torch.constant.int 1 | |
%int2_1581 = torch.constant.int 2 | |
%1568 = torch.aten.transpose.int %1507, %int1_1580, %int2_1581 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1568, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1582 = torch.constant.int 1 | |
%int2_1583 = torch.constant.int 2 | |
%1569 = torch.aten.transpose.int %1536, %int1_1582, %int2_1583 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1569, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1584 = torch.constant.int 1 | |
%int2_1585 = torch.constant.int 2 | |
%1570 = torch.aten.transpose.int %1478, %int1_1584, %int2_1585 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1570, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_1586 = torch.constant.int 2 | |
%int3_1587 = torch.constant.int 3 | |
%1571 = torch.aten.transpose.int %1569, %int2_1586, %int3_1587 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1571, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_1588 = torch.constant.int 1 | |
%int32_1589 = torch.constant.int 32 | |
%int100_1590 = torch.constant.int 100 | |
%1572 = torch.prim.ListConstruct %int1_1588, %int32_1589, %1493, %int100_1590 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1591 = torch.constant.bool false | |
%1573 = torch.aten.expand %1568, %1572, %false_1591 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1573, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1592 = torch.constant.int 32 | |
%int100_1593 = torch.constant.int 100 | |
%1574 = torch.prim.ListConstruct %int32_1592, %1493, %int100_1593 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1575 = torch.aten.view %1573, %1574 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1575, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1594 = torch.constant.int 1 | |
%int32_1595 = torch.constant.int 32 | |
%int100_1596 = torch.constant.int 100 | |
%1576 = torch.prim.ListConstruct %int1_1594, %int32_1595, %int100_1596, %1522 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1597 = torch.constant.bool false | |
%1577 = torch.aten.expand %1571, %1576, %false_1597 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1577, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_1598 = torch.constant.int 32 | |
%int100_1599 = torch.constant.int 100 | |
%1578 = torch.prim.ListConstruct %int32_1598, %int100_1599, %1522 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1579 = torch.aten.view %1577, %1578 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %1579, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%1580 = torch.aten.bmm %1575, %1579 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1580, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1600 = torch.constant.int 1 | |
%int32_1601 = torch.constant.int 32 | |
%1581 = torch.prim.ListConstruct %int1_1600, %int32_1601, %1493, %1522 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1582 = torch.aten.view %1580, %1581 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1582, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_1602 = torch.constant.float 1.000000e+01 | |
%1583 = torch.aten.div.Scalar %1582, %float1.000000e01_1602 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1583, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1603 = torch.constant.int 1 | |
%1584 = torch.aten.add.Tensor %1583, %266, %int1_1603 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1584, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_1604 = torch.constant.int 6 | |
%1585 = torch.prims.convert_element_type %1584, %int6_1604 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1585, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_1605 = torch.constant.int -1 | |
%false_1606 = torch.constant.bool false | |
%1586 = torch.aten._softmax %1585, %int-1_1605, %false_1606 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1586, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_1607 = torch.constant.int 5 | |
%1587 = torch.prims.convert_element_type %1586, %int5_1607 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1587, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1608 = torch.constant.int 1 | |
%int32_1609 = torch.constant.int 32 | |
%1588 = torch.prim.ListConstruct %int1_1608, %int32_1609, %1493, %1522 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1610 = torch.constant.bool false | |
%1589 = torch.aten.expand %1587, %1588, %false_1610 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1589, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_1611 = torch.constant.int 32 | |
%1590 = torch.prim.ListConstruct %int32_1611, %1493, %1522 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1591 = torch.aten.view %1589, %1590 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1591, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1612 = torch.constant.int 1 | |
%1592 = torch.aten.size.int %1472, %int1_1612 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_1613 = torch.constant.int 1 | |
%int32_1614 = torch.constant.int 32 | |
%int100_1615 = torch.constant.int 100 | |
%1593 = torch.prim.ListConstruct %int1_1613, %int32_1614, %1592, %int100_1615 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1616 = torch.constant.bool false | |
%1594 = torch.aten.expand %1570, %1593, %false_1616 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1594, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1617 = torch.constant.int 32 | |
%int100_1618 = torch.constant.int 100 | |
%1595 = torch.prim.ListConstruct %int32_1617, %1592, %int100_1618 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1596 = torch.aten.view %1594, %1595 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1596, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%1597 = torch.aten.bmm %1591, %1596 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1597, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1619 = torch.constant.int 1 | |
%int32_1620 = torch.constant.int 32 | |
%int100_1621 = torch.constant.int 100 | |
%1598 = torch.prim.ListConstruct %int1_1619, %int32_1620, %1493, %int100_1621 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1599 = torch.aten.view %1597, %1598 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1599, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1622 = torch.constant.int 1 | |
%int2_1623 = torch.constant.int 2 | |
%1600 = torch.aten.transpose.int %1599, %int1_1622, %int2_1623 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1600, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_1624 = torch.constant.int 0 | |
%1601 = torch.aten.clone %1600, %int0_1624 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1601, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1625 = torch.constant.int 1 | |
%int3200_1626 = torch.constant.int 3200 | |
%1602 = torch.prim.ListConstruct %int1_1625, %1493, %int3200_1626 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1603 = torch.aten._unsafe_view %1601, %1602 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1603, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1627 = torch.constant.int -2 | |
%int-1_1628 = torch.constant.int -1 | |
%1604 = torch.aten.transpose.int %59, %int-2_1627, %int-1_1628 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1629 = torch.constant.int 3200 | |
%1605 = torch.prim.ListConstruct %1493, %int3200_1629 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1606 = torch.aten.view %1603, %1605 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1606, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1607 = torch.aten.mm %1606, %1604 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1607, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1630 = torch.constant.int 1 | |
%int3200_1631 = torch.constant.int 3200 | |
%1608 = torch.prim.ListConstruct %int1_1630, %1493, %int3200_1631 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1609 = torch.aten.view %1607, %1608 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1609, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1632 = torch.constant.int 1 | |
%1610 = torch.aten.add.Tensor %1445, %1609, %int1_1632 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1610, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1633 = torch.constant.int 6 | |
%1611 = torch.prims.convert_element_type %1610, %int6_1633 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1611, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1634 = torch.constant.int 2 | |
%1612 = torch.aten.pow.Tensor_Scalar %1611, %int2_1634 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1612, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1635 = torch.constant.int -1 | |
%1613 = torch.prim.ListConstruct %int-1_1635 : (!torch.int) -> !torch.list<int> | |
%true_1636 = torch.constant.bool true | |
%none_1637 = torch.constant.none | |
%1614 = torch.aten.mean.dim %1612, %1613, %true_1636, %none_1637 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1614, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1638 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1639 = torch.constant.int 1 | |
%1615 = torch.aten.add.Scalar %1614, %float9.999990e-07_1638, %int1_1639 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1615, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1616 = torch.aten.rsqrt %1615 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1616, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1617 = torch.aten.mul.Tensor %1611, %1616 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1617, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1618 = torch.aten.mul.Tensor %60, %1617 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1618, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1640 = torch.constant.int 5 | |
%1619 = torch.prims.convert_element_type %1618, %int5_1640 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1619, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1641 = torch.constant.int -2 | |
%int-1_1642 = torch.constant.int -1 | |
%1620 = torch.aten.transpose.int %61, %int-2_1641, %int-1_1642 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1643 = torch.constant.int 3200 | |
%1621 = torch.prim.ListConstruct %240, %int3200_1643 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1622 = torch.aten.view %1619, %1621 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1622, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1623 = torch.aten.mm %1622, %1620 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1623, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1644 = torch.constant.int 1 | |
%int8640_1645 = torch.constant.int 8640 | |
%1624 = torch.prim.ListConstruct %int1_1644, %240, %int8640_1645 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1625 = torch.aten.view %1623, %1624 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1625, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1626 = torch.aten.silu %1625 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1626, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1646 = torch.constant.int -2 | |
%int-1_1647 = torch.constant.int -1 | |
%1627 = torch.aten.transpose.int %62, %int-2_1646, %int-1_1647 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1648 = torch.constant.int 3200 | |
%1628 = torch.prim.ListConstruct %240, %int3200_1648 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1629 = torch.aten.view %1619, %1628 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1629, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1630 = torch.aten.mm %1629, %1627 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1630, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1649 = torch.constant.int 1 | |
%int8640_1650 = torch.constant.int 8640 | |
%1631 = torch.prim.ListConstruct %int1_1649, %240, %int8640_1650 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1632 = torch.aten.view %1630, %1631 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1632, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1633 = torch.aten.mul.Tensor %1626, %1632 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1633, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1651 = torch.constant.int -2 | |
%int-1_1652 = torch.constant.int -1 | |
%1634 = torch.aten.transpose.int %63, %int-2_1651, %int-1_1652 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_1653 = torch.constant.int 1 | |
%1635 = torch.aten.size.int %1625, %int1_1653 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_1654 = torch.constant.int 8640 | |
%1636 = torch.prim.ListConstruct %1635, %int8640_1654 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1637 = torch.aten.view %1633, %1636 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1637, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%1638 = torch.aten.mm %1637, %1634 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1638, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1655 = torch.constant.int 1 | |
%int3200_1656 = torch.constant.int 3200 | |
%1639 = torch.prim.ListConstruct %int1_1655, %1635, %int3200_1656 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1640 = torch.aten.view %1638, %1639 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1640, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1657 = torch.constant.int 1 | |
%1641 = torch.aten.add.Tensor %1610, %1640, %int1_1657 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1641, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1658 = torch.constant.int 6 | |
%1642 = torch.prims.convert_element_type %1641, %int6_1658 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1642, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1659 = torch.constant.int 2 | |
%1643 = torch.aten.pow.Tensor_Scalar %1642, %int2_1659 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1643, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1660 = torch.constant.int -1 | |
%1644 = torch.prim.ListConstruct %int-1_1660 : (!torch.int) -> !torch.list<int> | |
%true_1661 = torch.constant.bool true | |
%none_1662 = torch.constant.none | |
%1645 = torch.aten.mean.dim %1643, %1644, %true_1661, %none_1662 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1645, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1663 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1664 = torch.constant.int 1 | |
%1646 = torch.aten.add.Scalar %1645, %float9.999990e-07_1663, %int1_1664 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1646, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1647 = torch.aten.rsqrt %1646 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1647, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1648 = torch.aten.mul.Tensor %1642, %1647 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1648, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1649 = torch.aten.mul.Tensor %64, %1648 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1649, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1665 = torch.constant.int 5 | |
%1650 = torch.prims.convert_element_type %1649, %int5_1665 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1650, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1666 = torch.constant.int -2 | |
%int-1_1667 = torch.constant.int -1 | |
%1651 = torch.aten.transpose.int %65, %int-2_1666, %int-1_1667 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1668 = torch.constant.int 3200 | |
%1652 = torch.prim.ListConstruct %240, %int3200_1668 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1653 = torch.aten.view %1650, %1652 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1653, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1654 = torch.aten.mm %1653, %1651 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1654, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1669 = torch.constant.int 1 | |
%int3200_1670 = torch.constant.int 3200 | |
%1655 = torch.prim.ListConstruct %int1_1669, %240, %int3200_1670 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1656 = torch.aten.view %1654, %1655 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1656, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1671 = torch.constant.int -2 | |
%int-1_1672 = torch.constant.int -1 | |
%1657 = torch.aten.transpose.int %66, %int-2_1671, %int-1_1672 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1673 = torch.constant.int 3200 | |
%1658 = torch.prim.ListConstruct %240, %int3200_1673 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1659 = torch.aten.view %1650, %1658 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1659, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1660 = torch.aten.mm %1659, %1657 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1660, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1674 = torch.constant.int 1 | |
%int3200_1675 = torch.constant.int 3200 | |
%1661 = torch.prim.ListConstruct %int1_1674, %240, %int3200_1675 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1662 = torch.aten.view %1660, %1661 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1662, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1676 = torch.constant.int -2 | |
%int-1_1677 = torch.constant.int -1 | |
%1663 = torch.aten.transpose.int %67, %int-2_1676, %int-1_1677 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1678 = torch.constant.int 3200 | |
%1664 = torch.prim.ListConstruct %240, %int3200_1678 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1665 = torch.aten.view %1650, %1664 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1665, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1666 = torch.aten.mm %1665, %1663 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1666, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1679 = torch.constant.int 1 | |
%int3200_1680 = torch.constant.int 3200 | |
%1667 = torch.prim.ListConstruct %int1_1679, %240, %int3200_1680 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1668 = torch.aten.view %1666, %1667 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1668, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1681 = torch.constant.int 1 | |
%int32_1682 = torch.constant.int 32 | |
%int100_1683 = torch.constant.int 100 | |
%1669 = torch.prim.ListConstruct %int1_1681, %240, %int32_1682, %int100_1683 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1670 = torch.aten.view %1656, %1669 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1670, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1684 = torch.constant.int 1 | |
%int32_1685 = torch.constant.int 32 | |
%int100_1686 = torch.constant.int 100 | |
%1671 = torch.prim.ListConstruct %int1_1684, %240, %int32_1685, %int100_1686 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1672 = torch.aten.view %1662, %1671 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1672, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1687 = torch.constant.int 1 | |
%int32_1688 = torch.constant.int 32 | |
%int100_1689 = torch.constant.int 100 | |
%1673 = torch.prim.ListConstruct %int1_1687, %240, %int32_1688, %int100_1689 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1674 = torch.aten.view %1668, %1673 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1674, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1690 = torch.constant.int 2048 | |
%none_1691 = torch.constant.none | |
%none_1692 = torch.constant.none | |
%cpu_1693 = torch.constant.device "cpu" | |
%false_1694 = torch.constant.bool false | |
%1675 = torch.aten.arange %int2048_1690, %none_1691, %none_1692, %cpu_1693, %false_1694 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1695 = torch.constant.int 0 | |
%int100_1696 = torch.constant.int 100 | |
%int2_1697 = torch.constant.int 2 | |
%none_1698 = torch.constant.none | |
%none_1699 = torch.constant.none | |
%cpu_1700 = torch.constant.device "cpu" | |
%false_1701 = torch.constant.bool false | |
%1676 = torch.aten.arange.start_step %int0_1695, %int100_1696, %int2_1697, %none_1698, %none_1699, %cpu_1700, %false_1701 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1702 = torch.constant.int 0 | |
%int0_1703 = torch.constant.int 0 | |
%int50_1704 = torch.constant.int 50 | |
%int1_1705 = torch.constant.int 1 | |
%1677 = torch.aten.slice.Tensor %1676, %int0_1702, %int0_1703, %int50_1704, %int1_1705 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1706 = torch.constant.int 6 | |
%1678 = torch.prims.convert_element_type %1677, %int6_1706 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1707 = torch.constant.int 100 | |
%1679 = torch.aten.div.Scalar %1678, %int100_1707 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1708 = torch.constant.float 1.000000e+04 | |
%1680 = torch.aten.pow.Scalar %float1.000000e04_1708, %1679 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1681 = torch.aten.reciprocal %1680 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1709 = torch.constant.float 1.000000e+00 | |
%1682 = torch.aten.mul.Scalar %1681, %float1.000000e00_1709 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1710 = torch.constant.int 2048 | |
%int1_1711 = torch.constant.int 1 | |
%1683 = torch.prim.ListConstruct %int2048_1710, %int1_1711 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1684 = torch.aten.view %1675, %1683 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1685 = torch.aten.mul.Tensor %1684, %1682 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1686 = torch.aten.cos %1685 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1687 = torch.aten.sin %1685 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1688 = torch.aten.complex %1686, %1687 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1712 = torch.constant.int 1 | |
%1689 = torch.aten.size.int %1656, %int1_1712 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1713 = torch.constant.int 0 | |
%1690 = torch.aten.add.int %int0_1713, %1689 : !torch.int, !torch.int -> !torch.int | |
%int0_1714 = torch.constant.int 0 | |
%int0_1715 = torch.constant.int 0 | |
%int1_1716 = torch.constant.int 1 | |
%1691 = torch.aten.slice.Tensor %1688, %int0_1714, %int0_1715, %1690, %int1_1716 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1691, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1717 = torch.constant.int 1 | |
%int0_1718 = torch.constant.int 0 | |
%int9223372036854775807_1719 = torch.constant.int 9223372036854775807 | |
%int1_1720 = torch.constant.int 1 | |
%1692 = torch.aten.slice.Tensor %1691, %int1_1717, %int0_1718, %int9223372036854775807_1719, %int1_1720 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1692, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1721 = torch.constant.int 0 | |
%1693 = torch.aten.unsqueeze %1692, %int0_1721 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1693, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1722 = torch.constant.int 2 | |
%1694 = torch.aten.unsqueeze %1693, %int2_1722 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1694, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1723 = torch.constant.int 3 | |
%int0_1724 = torch.constant.int 0 | |
%int9223372036854775807_1725 = torch.constant.int 9223372036854775807 | |
%int1_1726 = torch.constant.int 1 | |
%1695 = torch.aten.slice.Tensor %1694, %int3_1723, %int0_1724, %int9223372036854775807_1725, %int1_1726 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1695, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1696 = torch_c.to_builtin_tensor %1670 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1727 = arith.constant 1 : index | |
%dim_1728 = tensor.dim %1696, %c1_1727 : tensor<1x?x32x100xf16> | |
%1697 = flow.tensor.bitcast %1696 : tensor<1x?x32x100xf16>{%dim_1728} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1728} | |
%1698 = torch_c.from_builtin_tensor %1697 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1698, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1699 = torch.aten.mul.Tensor %1698, %1695 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1699, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1700 = torch_c.to_builtin_tensor %1699 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1729 = arith.constant 1 : index | |
%dim_1730 = tensor.dim %1700, %c1_1729 : tensor<1x?x32x50xcomplex<f32>> | |
%1701 = flow.tensor.bitcast %1700 : tensor<1x?x32x50xcomplex<f32>>{%dim_1730} -> tensor<1x?x32x100xf32>{%dim_1730} | |
%1702 = torch_c.from_builtin_tensor %1701 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1702, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1731 = torch.constant.int 5 | |
%1703 = torch.prims.convert_element_type %1702, %int5_1731 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1703, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1732 = torch.constant.int 2048 | |
%none_1733 = torch.constant.none | |
%none_1734 = torch.constant.none | |
%cpu_1735 = torch.constant.device "cpu" | |
%false_1736 = torch.constant.bool false | |
%1704 = torch.aten.arange %int2048_1732, %none_1733, %none_1734, %cpu_1735, %false_1736 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1737 = torch.constant.int 0 | |
%int100_1738 = torch.constant.int 100 | |
%int2_1739 = torch.constant.int 2 | |
%none_1740 = torch.constant.none | |
%none_1741 = torch.constant.none | |
%cpu_1742 = torch.constant.device "cpu" | |
%false_1743 = torch.constant.bool false | |
%1705 = torch.aten.arange.start_step %int0_1737, %int100_1738, %int2_1739, %none_1740, %none_1741, %cpu_1742, %false_1743 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1744 = torch.constant.int 0 | |
%int0_1745 = torch.constant.int 0 | |
%int50_1746 = torch.constant.int 50 | |
%int1_1747 = torch.constant.int 1 | |
%1706 = torch.aten.slice.Tensor %1705, %int0_1744, %int0_1745, %int50_1746, %int1_1747 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1748 = torch.constant.int 6 | |
%1707 = torch.prims.convert_element_type %1706, %int6_1748 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1749 = torch.constant.int 100 | |
%1708 = torch.aten.div.Scalar %1707, %int100_1749 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1750 = torch.constant.float 1.000000e+04 | |
%1709 = torch.aten.pow.Scalar %float1.000000e04_1750, %1708 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1710 = torch.aten.reciprocal %1709 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1751 = torch.constant.float 1.000000e+00 | |
%1711 = torch.aten.mul.Scalar %1710, %float1.000000e00_1751 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1752 = torch.constant.int 2048 | |
%int1_1753 = torch.constant.int 1 | |
%1712 = torch.prim.ListConstruct %int2048_1752, %int1_1753 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1713 = torch.aten.view %1704, %1712 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1714 = torch.aten.mul.Tensor %1713, %1711 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1715 = torch.aten.cos %1714 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1716 = torch.aten.sin %1714 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1717 = torch.aten.complex %1715, %1716 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1754 = torch.constant.int 1 | |
%1718 = torch.aten.size.int %1662, %int1_1754 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1755 = torch.constant.int 0 | |
%1719 = torch.aten.add.int %int0_1755, %1718 : !torch.int, !torch.int -> !torch.int | |
%int0_1756 = torch.constant.int 0 | |
%int0_1757 = torch.constant.int 0 | |
%int1_1758 = torch.constant.int 1 | |
%1720 = torch.aten.slice.Tensor %1717, %int0_1756, %int0_1757, %1719, %int1_1758 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1720, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1759 = torch.constant.int 1 | |
%int0_1760 = torch.constant.int 0 | |
%int9223372036854775807_1761 = torch.constant.int 9223372036854775807 | |
%int1_1762 = torch.constant.int 1 | |
%1721 = torch.aten.slice.Tensor %1720, %int1_1759, %int0_1760, %int9223372036854775807_1761, %int1_1762 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1721, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1763 = torch.constant.int 0 | |
%1722 = torch.aten.unsqueeze %1721, %int0_1763 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1722, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1764 = torch.constant.int 2 | |
%1723 = torch.aten.unsqueeze %1722, %int2_1764 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1723, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1765 = torch.constant.int 3 | |
%int0_1766 = torch.constant.int 0 | |
%int9223372036854775807_1767 = torch.constant.int 9223372036854775807 | |
%int1_1768 = torch.constant.int 1 | |
%1724 = torch.aten.slice.Tensor %1723, %int3_1765, %int0_1766, %int9223372036854775807_1767, %int1_1768 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1724, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1725 = torch_c.to_builtin_tensor %1672 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1769 = arith.constant 1 : index | |
%dim_1770 = tensor.dim %1725, %c1_1769 : tensor<1x?x32x100xf16> | |
%1726 = flow.tensor.bitcast %1725 : tensor<1x?x32x100xf16>{%dim_1770} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1770} | |
%1727 = torch_c.from_builtin_tensor %1726 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1727, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1728 = torch.aten.mul.Tensor %1727, %1724 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1728, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1729 = torch_c.to_builtin_tensor %1728 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1771 = arith.constant 1 : index | |
%dim_1772 = tensor.dim %1729, %c1_1771 : tensor<1x?x32x50xcomplex<f32>> | |
%1730 = flow.tensor.bitcast %1729 : tensor<1x?x32x50xcomplex<f32>>{%dim_1772} -> tensor<1x?x32x100xf32>{%dim_1772} | |
%1731 = torch_c.from_builtin_tensor %1730 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1731, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1773 = torch.constant.int 5 | |
%1732 = torch.prims.convert_element_type %1731, %int5_1773 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1732, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_1774 = torch.constant.int 52 | |
%1733 = torch.aten.mul.Scalar %arg2, %int52_1774 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1733, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int14 = torch.constant.int 14 | |
%int1_1775 = torch.constant.int 1 | |
%1734 = torch.aten.add.Scalar %1733, %int14, %int1_1775 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1734, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_1776 = torch.constant.int 1 | |
%int16_1777 = torch.constant.int 16 | |
%int32_1778 = torch.constant.int 32 | |
%int100_1779 = torch.constant.int 100 | |
%1735 = torch.prim.ListConstruct %int1_1776, %368, %int16_1777, %int32_1778, %int100_1779 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1736 = torch.aten.view %1732, %1735 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1736, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1780 = torch.constant.int 16 | |
%int32_1781 = torch.constant.int 32 | |
%int100_1782 = torch.constant.int 100 | |
%1737 = torch.prim.ListConstruct %368, %int16_1780, %int32_1781, %int100_1782 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1738 = torch.aten.view %1736, %1737 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1738, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1739 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1740 = torch.aten.view %1734, %1739 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1740, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_1783 = torch.constant.int 1 | |
%int16_1784 = torch.constant.int 16 | |
%int32_1785 = torch.constant.int 32 | |
%int100_1786 = torch.constant.int 100 | |
%1741 = torch.prim.ListConstruct %int1_1783, %368, %int16_1784, %int32_1785, %int100_1786 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1742 = torch.aten.view %1674, %1741 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1742, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_1787 = torch.constant.int 16 | |
%int32_1788 = torch.constant.int 32 | |
%int100_1789 = torch.constant.int 100 | |
%1743 = torch.prim.ListConstruct %368, %int16_1787, %int32_1788, %int100_1789 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1744 = torch.aten.view %1742, %1743 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1744, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_1790 = torch.constant.int 1 | |
%int1_1791 = torch.constant.int 1 | |
%1745 = torch.aten.add.Scalar %1734, %int1_1790, %int1_1791 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1745, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%1746 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1747 = torch.aten.view %1745, %1746 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1747, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%1748 = torch.prim.ListConstruct %1740, %1747 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_1792 = torch.constant.int 0 | |
%1749 = torch.aten.cat %1748, %int0_1792 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1749, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%1750 = torch.prim.ListConstruct %1738, %1744 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_1793 = torch.constant.int 0 | |
%1751 = torch.aten.cat %1750, %int0_1793 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1751, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1794 = torch.constant.int 26 | |
%int2_1795 = torch.constant.int 2 | |
%int16_1796 = torch.constant.int 16 | |
%int32_1797 = torch.constant.int 32 | |
%int100_1798 = torch.constant.int 100 | |
%1752 = torch.prim.ListConstruct %359, %int26_1794, %int2_1795, %int16_1796, %int32_1797, %int100_1798 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1753 = torch.aten.view %1567, %1752 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1753, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_1799 = torch.constant.int 26 | |
%1754 = torch.aten.mul.int %359, %int26_1799 : !torch.int, !torch.int -> !torch.int | |
%int2_1800 = torch.constant.int 2 | |
%1755 = torch.aten.mul.int %1754, %int2_1800 : !torch.int, !torch.int -> !torch.int | |
%int16_1801 = torch.constant.int 16 | |
%int32_1802 = torch.constant.int 32 | |
%int100_1803 = torch.constant.int 100 | |
%1756 = torch.prim.ListConstruct %1755, %int16_1801, %int32_1802, %int100_1803 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1757 = torch.aten.view %1753, %1756 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1757, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1758 = torch.prim.ListConstruct %1749 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_1804 = torch.constant.bool false | |
%1759 = torch.aten.index_put %1757, %1758, %1751, %false_1804 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1759, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_1805 = torch.constant.int 26 | |
%int2_1806 = torch.constant.int 2 | |
%int16_1807 = torch.constant.int 16 | |
%int32_1808 = torch.constant.int 32 | |
%int100_1809 = torch.constant.int 100 | |
%1760 = torch.prim.ListConstruct %359, %int26_1805, %int2_1806, %int16_1807, %int32_1808, %int100_1809 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1761 = torch.aten.view %1759, %1760 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1761, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_1810 = torch.constant.int 2662400 | |
%1762 = torch.prim.ListConstruct %359, %int2662400_1810 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1763 = torch.aten.view %1761, %1762 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %1763, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_1811 = torch.constant.int 1 | |
%int2_1812 = torch.constant.int 2 | |
%1764 = torch.aten.transpose.int %1703, %int1_1811, %int2_1812 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1764, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1813 = torch.constant.int 1 | |
%int2_1814 = torch.constant.int 2 | |
%1765 = torch.aten.transpose.int %1732, %int1_1813, %int2_1814 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1765, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1815 = torch.constant.int 1 | |
%int2_1816 = torch.constant.int 2 | |
%1766 = torch.aten.transpose.int %1674, %int1_1815, %int2_1816 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1766, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_1817 = torch.constant.int 2 | |
%int3_1818 = torch.constant.int 3 | |
%1767 = torch.aten.transpose.int %1765, %int2_1817, %int3_1818 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1767, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_1819 = torch.constant.int 1 | |
%int32_1820 = torch.constant.int 32 | |
%int100_1821 = torch.constant.int 100 | |
%1768 = torch.prim.ListConstruct %int1_1819, %int32_1820, %1689, %int100_1821 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1822 = torch.constant.bool false | |
%1769 = torch.aten.expand %1764, %1768, %false_1822 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1769, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1823 = torch.constant.int 32 | |
%int100_1824 = torch.constant.int 100 | |
%1770 = torch.prim.ListConstruct %int32_1823, %1689, %int100_1824 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1771 = torch.aten.view %1769, %1770 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1771, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1825 = torch.constant.int 1 | |
%int32_1826 = torch.constant.int 32 | |
%int100_1827 = torch.constant.int 100 | |
%1772 = torch.prim.ListConstruct %int1_1825, %int32_1826, %int100_1827, %1718 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1828 = torch.constant.bool false | |
%1773 = torch.aten.expand %1767, %1772, %false_1828 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1773, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_1829 = torch.constant.int 32 | |
%int100_1830 = torch.constant.int 100 | |
%1774 = torch.prim.ListConstruct %int32_1829, %int100_1830, %1718 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1775 = torch.aten.view %1773, %1774 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %1775, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%1776 = torch.aten.bmm %1771, %1775 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1776, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1831 = torch.constant.int 1 | |
%int32_1832 = torch.constant.int 32 | |
%1777 = torch.prim.ListConstruct %int1_1831, %int32_1832, %1689, %1718 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1778 = torch.aten.view %1776, %1777 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1778, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_1833 = torch.constant.float 1.000000e+01 | |
%1779 = torch.aten.div.Scalar %1778, %float1.000000e01_1833 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1779, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1834 = torch.constant.int 1 | |
%1780 = torch.aten.add.Tensor %1779, %266, %int1_1834 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1780, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_1835 = torch.constant.int 6 | |
%1781 = torch.prims.convert_element_type %1780, %int6_1835 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1781, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_1836 = torch.constant.int -1 | |
%false_1837 = torch.constant.bool false | |
%1782 = torch.aten._softmax %1781, %int-1_1836, %false_1837 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1782, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_1838 = torch.constant.int 5 | |
%1783 = torch.prims.convert_element_type %1782, %int5_1838 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1783, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_1839 = torch.constant.int 1 | |
%int32_1840 = torch.constant.int 32 | |
%1784 = torch.prim.ListConstruct %int1_1839, %int32_1840, %1689, %1718 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1841 = torch.constant.bool false | |
%1785 = torch.aten.expand %1783, %1784, %false_1841 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1785, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_1842 = torch.constant.int 32 | |
%1786 = torch.prim.ListConstruct %int32_1842, %1689, %1718 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1787 = torch.aten.view %1785, %1786 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1787, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_1843 = torch.constant.int 1 | |
%1788 = torch.aten.size.int %1668, %int1_1843 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_1844 = torch.constant.int 1 | |
%int32_1845 = torch.constant.int 32 | |
%int100_1846 = torch.constant.int 100 | |
%1789 = torch.prim.ListConstruct %int1_1844, %int32_1845, %1788, %int100_1846 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_1847 = torch.constant.bool false | |
%1790 = torch.aten.expand %1766, %1789, %false_1847 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1790, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_1848 = torch.constant.int 32 | |
%int100_1849 = torch.constant.int 100 | |
%1791 = torch.prim.ListConstruct %int32_1848, %1788, %int100_1849 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1792 = torch.aten.view %1790, %1791 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1792, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%1793 = torch.aten.bmm %1787, %1792 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1793, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_1850 = torch.constant.int 1 | |
%int32_1851 = torch.constant.int 32 | |
%int100_1852 = torch.constant.int 100 | |
%1794 = torch.prim.ListConstruct %int1_1850, %int32_1851, %1689, %int100_1852 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1795 = torch.aten.view %1793, %1794 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1795, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_1853 = torch.constant.int 1 | |
%int2_1854 = torch.constant.int 2 | |
%1796 = torch.aten.transpose.int %1795, %int1_1853, %int2_1854 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1796, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_1855 = torch.constant.int 0 | |
%1797 = torch.aten.clone %1796, %int0_1855 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1797, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1856 = torch.constant.int 1 | |
%int3200_1857 = torch.constant.int 3200 | |
%1798 = torch.prim.ListConstruct %int1_1856, %1689, %int3200_1857 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1799 = torch.aten._unsafe_view %1797, %1798 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1799, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1858 = torch.constant.int -2 | |
%int-1_1859 = torch.constant.int -1 | |
%1800 = torch.aten.transpose.int %68, %int-2_1858, %int-1_1859 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1860 = torch.constant.int 3200 | |
%1801 = torch.prim.ListConstruct %1689, %int3200_1860 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1802 = torch.aten.view %1799, %1801 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1802, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1803 = torch.aten.mm %1802, %1800 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1803, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1861 = torch.constant.int 1 | |
%int3200_1862 = torch.constant.int 3200 | |
%1804 = torch.prim.ListConstruct %int1_1861, %1689, %int3200_1862 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1805 = torch.aten.view %1803, %1804 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1805, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1863 = torch.constant.int 1 | |
%1806 = torch.aten.add.Tensor %1641, %1805, %int1_1863 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1806, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1864 = torch.constant.int 6 | |
%1807 = torch.prims.convert_element_type %1806, %int6_1864 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1807, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1865 = torch.constant.int 2 | |
%1808 = torch.aten.pow.Tensor_Scalar %1807, %int2_1865 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1808, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1866 = torch.constant.int -1 | |
%1809 = torch.prim.ListConstruct %int-1_1866 : (!torch.int) -> !torch.list<int> | |
%true_1867 = torch.constant.bool true | |
%none_1868 = torch.constant.none | |
%1810 = torch.aten.mean.dim %1808, %1809, %true_1867, %none_1868 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1810, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1869 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1870 = torch.constant.int 1 | |
%1811 = torch.aten.add.Scalar %1810, %float9.999990e-07_1869, %int1_1870 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1811, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1812 = torch.aten.rsqrt %1811 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1812, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1813 = torch.aten.mul.Tensor %1807, %1812 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1813, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1814 = torch.aten.mul.Tensor %69, %1813 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1814, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1871 = torch.constant.int 5 | |
%1815 = torch.prims.convert_element_type %1814, %int5_1871 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1815, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1872 = torch.constant.int -2 | |
%int-1_1873 = torch.constant.int -1 | |
%1816 = torch.aten.transpose.int %70, %int-2_1872, %int-1_1873 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1874 = torch.constant.int 3200 | |
%1817 = torch.prim.ListConstruct %240, %int3200_1874 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1818 = torch.aten.view %1815, %1817 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1818, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1819 = torch.aten.mm %1818, %1816 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1819, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1875 = torch.constant.int 1 | |
%int8640_1876 = torch.constant.int 8640 | |
%1820 = torch.prim.ListConstruct %int1_1875, %240, %int8640_1876 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1821 = torch.aten.view %1819, %1820 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1821, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1822 = torch.aten.silu %1821 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1822, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1877 = torch.constant.int -2 | |
%int-1_1878 = torch.constant.int -1 | |
%1823 = torch.aten.transpose.int %71, %int-2_1877, %int-1_1878 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_1879 = torch.constant.int 3200 | |
%1824 = torch.prim.ListConstruct %240, %int3200_1879 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1825 = torch.aten.view %1815, %1824 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1825, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1826 = torch.aten.mm %1825, %1823 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1826, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_1880 = torch.constant.int 1 | |
%int8640_1881 = torch.constant.int 8640 | |
%1827 = torch.prim.ListConstruct %int1_1880, %240, %int8640_1881 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1828 = torch.aten.view %1826, %1827 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1828, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%1829 = torch.aten.mul.Tensor %1822, %1828 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %1829, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_1882 = torch.constant.int -2 | |
%int-1_1883 = torch.constant.int -1 | |
%1830 = torch.aten.transpose.int %72, %int-2_1882, %int-1_1883 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_1884 = torch.constant.int 1 | |
%1831 = torch.aten.size.int %1821, %int1_1884 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_1885 = torch.constant.int 8640 | |
%1832 = torch.prim.ListConstruct %1831, %int8640_1885 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1833 = torch.aten.view %1829, %1832 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %1833, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%1834 = torch.aten.mm %1833, %1830 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1834, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1886 = torch.constant.int 1 | |
%int3200_1887 = torch.constant.int 3200 | |
%1835 = torch.prim.ListConstruct %int1_1886, %1831, %int3200_1887 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1836 = torch.aten.view %1834, %1835 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1836, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1888 = torch.constant.int 1 | |
%1837 = torch.aten.add.Tensor %1806, %1836, %int1_1888 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1837, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_1889 = torch.constant.int 6 | |
%1838 = torch.prims.convert_element_type %1837, %int6_1889 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1838, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_1890 = torch.constant.int 2 | |
%1839 = torch.aten.pow.Tensor_Scalar %1838, %int2_1890 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1839, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_1891 = torch.constant.int -1 | |
%1840 = torch.prim.ListConstruct %int-1_1891 : (!torch.int) -> !torch.list<int> | |
%true_1892 = torch.constant.bool true | |
%none_1893 = torch.constant.none | |
%1841 = torch.aten.mean.dim %1839, %1840, %true_1892, %none_1893 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1841, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_1894 = torch.constant.float 9.9999999747524271E-7 | |
%int1_1895 = torch.constant.int 1 | |
%1842 = torch.aten.add.Scalar %1841, %float9.999990e-07_1894, %int1_1895 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1842, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1843 = torch.aten.rsqrt %1842 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %1843, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%1844 = torch.aten.mul.Tensor %1838, %1843 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1844, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%1845 = torch.aten.mul.Tensor %73, %1844 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %1845, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_1896 = torch.constant.int 5 | |
%1846 = torch.prims.convert_element_type %1845, %int5_1896 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1846, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1897 = torch.constant.int -2 | |
%int-1_1898 = torch.constant.int -1 | |
%1847 = torch.aten.transpose.int %74, %int-2_1897, %int-1_1898 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1899 = torch.constant.int 3200 | |
%1848 = torch.prim.ListConstruct %240, %int3200_1899 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1849 = torch.aten.view %1846, %1848 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1849, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1850 = torch.aten.mm %1849, %1847 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1850, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1900 = torch.constant.int 1 | |
%int3200_1901 = torch.constant.int 3200 | |
%1851 = torch.prim.ListConstruct %int1_1900, %240, %int3200_1901 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1852 = torch.aten.view %1850, %1851 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1852, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1902 = torch.constant.int -2 | |
%int-1_1903 = torch.constant.int -1 | |
%1853 = torch.aten.transpose.int %75, %int-2_1902, %int-1_1903 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1904 = torch.constant.int 3200 | |
%1854 = torch.prim.ListConstruct %240, %int3200_1904 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1855 = torch.aten.view %1846, %1854 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1855, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1856 = torch.aten.mm %1855, %1853 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1856, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1905 = torch.constant.int 1 | |
%int3200_1906 = torch.constant.int 3200 | |
%1857 = torch.prim.ListConstruct %int1_1905, %240, %int3200_1906 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1858 = torch.aten.view %1856, %1857 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1858, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_1907 = torch.constant.int -2 | |
%int-1_1908 = torch.constant.int -1 | |
%1859 = torch.aten.transpose.int %76, %int-2_1907, %int-1_1908 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_1909 = torch.constant.int 3200 | |
%1860 = torch.prim.ListConstruct %240, %int3200_1909 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1861 = torch.aten.view %1846, %1860 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1861, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1862 = torch.aten.mm %1861, %1859 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1862, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_1910 = torch.constant.int 1 | |
%int3200_1911 = torch.constant.int 3200 | |
%1863 = torch.prim.ListConstruct %int1_1910, %240, %int3200_1911 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1864 = torch.aten.view %1862, %1863 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1864, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_1912 = torch.constant.int 1 | |
%int32_1913 = torch.constant.int 32 | |
%int100_1914 = torch.constant.int 100 | |
%1865 = torch.prim.ListConstruct %int1_1912, %240, %int32_1913, %int100_1914 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1866 = torch.aten.view %1852, %1865 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1866, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1915 = torch.constant.int 1 | |
%int32_1916 = torch.constant.int 32 | |
%int100_1917 = torch.constant.int 100 | |
%1867 = torch.prim.ListConstruct %int1_1915, %240, %int32_1916, %int100_1917 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1868 = torch.aten.view %1858, %1867 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1868, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_1918 = torch.constant.int 1 | |
%int32_1919 = torch.constant.int 32 | |
%int100_1920 = torch.constant.int 100 | |
%1869 = torch.prim.ListConstruct %int1_1918, %240, %int32_1919, %int100_1920 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1870 = torch.aten.view %1864, %1869 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1870, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1921 = torch.constant.int 2048 | |
%none_1922 = torch.constant.none | |
%none_1923 = torch.constant.none | |
%cpu_1924 = torch.constant.device "cpu" | |
%false_1925 = torch.constant.bool false | |
%1871 = torch.aten.arange %int2048_1921, %none_1922, %none_1923, %cpu_1924, %false_1925 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1926 = torch.constant.int 0 | |
%int100_1927 = torch.constant.int 100 | |
%int2_1928 = torch.constant.int 2 | |
%none_1929 = torch.constant.none | |
%none_1930 = torch.constant.none | |
%cpu_1931 = torch.constant.device "cpu" | |
%false_1932 = torch.constant.bool false | |
%1872 = torch.aten.arange.start_step %int0_1926, %int100_1927, %int2_1928, %none_1929, %none_1930, %cpu_1931, %false_1932 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1933 = torch.constant.int 0 | |
%int0_1934 = torch.constant.int 0 | |
%int50_1935 = torch.constant.int 50 | |
%int1_1936 = torch.constant.int 1 | |
%1873 = torch.aten.slice.Tensor %1872, %int0_1933, %int0_1934, %int50_1935, %int1_1936 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1937 = torch.constant.int 6 | |
%1874 = torch.prims.convert_element_type %1873, %int6_1937 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1938 = torch.constant.int 100 | |
%1875 = torch.aten.div.Scalar %1874, %int100_1938 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1939 = torch.constant.float 1.000000e+04 | |
%1876 = torch.aten.pow.Scalar %float1.000000e04_1939, %1875 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1877 = torch.aten.reciprocal %1876 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1940 = torch.constant.float 1.000000e+00 | |
%1878 = torch.aten.mul.Scalar %1877, %float1.000000e00_1940 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1941 = torch.constant.int 2048 | |
%int1_1942 = torch.constant.int 1 | |
%1879 = torch.prim.ListConstruct %int2048_1941, %int1_1942 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1880 = torch.aten.view %1871, %1879 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1881 = torch.aten.mul.Tensor %1880, %1878 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1882 = torch.aten.cos %1881 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1883 = torch.aten.sin %1881 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1884 = torch.aten.complex %1882, %1883 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1943 = torch.constant.int 1 | |
%1885 = torch.aten.size.int %1852, %int1_1943 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1944 = torch.constant.int 0 | |
%1886 = torch.aten.add.int %int0_1944, %1885 : !torch.int, !torch.int -> !torch.int | |
%int0_1945 = torch.constant.int 0 | |
%int0_1946 = torch.constant.int 0 | |
%int1_1947 = torch.constant.int 1 | |
%1887 = torch.aten.slice.Tensor %1884, %int0_1945, %int0_1946, %1886, %int1_1947 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1887, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1948 = torch.constant.int 1 | |
%int0_1949 = torch.constant.int 0 | |
%int9223372036854775807_1950 = torch.constant.int 9223372036854775807 | |
%int1_1951 = torch.constant.int 1 | |
%1888 = torch.aten.slice.Tensor %1887, %int1_1948, %int0_1949, %int9223372036854775807_1950, %int1_1951 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1888, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1952 = torch.constant.int 0 | |
%1889 = torch.aten.unsqueeze %1888, %int0_1952 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1889, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1953 = torch.constant.int 2 | |
%1890 = torch.aten.unsqueeze %1889, %int2_1953 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1890, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1954 = torch.constant.int 3 | |
%int0_1955 = torch.constant.int 0 | |
%int9223372036854775807_1956 = torch.constant.int 9223372036854775807 | |
%int1_1957 = torch.constant.int 1 | |
%1891 = torch.aten.slice.Tensor %1890, %int3_1954, %int0_1955, %int9223372036854775807_1956, %int1_1957 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1891, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1892 = torch_c.to_builtin_tensor %1866 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_1958 = arith.constant 1 : index | |
%dim_1959 = tensor.dim %1892, %c1_1958 : tensor<1x?x32x100xf16> | |
%1893 = flow.tensor.bitcast %1892 : tensor<1x?x32x100xf16>{%dim_1959} -> tensor<1x?x32x50xcomplex<f16>>{%dim_1959} | |
%1894 = torch_c.from_builtin_tensor %1893 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1894, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1895 = torch.aten.mul.Tensor %1894, %1891 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1895, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1896 = torch_c.to_builtin_tensor %1895 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_1960 = arith.constant 1 : index | |
%dim_1961 = tensor.dim %1896, %c1_1960 : tensor<1x?x32x50xcomplex<f32>> | |
%1897 = flow.tensor.bitcast %1896 : tensor<1x?x32x50xcomplex<f32>>{%dim_1961} -> tensor<1x?x32x100xf32>{%dim_1961} | |
%1898 = torch_c.from_builtin_tensor %1897 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1898, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_1962 = torch.constant.int 5 | |
%1899 = torch.prims.convert_element_type %1898, %int5_1962 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1899, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_1963 = torch.constant.int 2048 | |
%none_1964 = torch.constant.none | |
%none_1965 = torch.constant.none | |
%cpu_1966 = torch.constant.device "cpu" | |
%false_1967 = torch.constant.bool false | |
%1900 = torch.aten.arange %int2048_1963, %none_1964, %none_1965, %cpu_1966, %false_1967 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_1968 = torch.constant.int 0 | |
%int100_1969 = torch.constant.int 100 | |
%int2_1970 = torch.constant.int 2 | |
%none_1971 = torch.constant.none | |
%none_1972 = torch.constant.none | |
%cpu_1973 = torch.constant.device "cpu" | |
%false_1974 = torch.constant.bool false | |
%1901 = torch.aten.arange.start_step %int0_1968, %int100_1969, %int2_1970, %none_1971, %none_1972, %cpu_1973, %false_1974 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_1975 = torch.constant.int 0 | |
%int0_1976 = torch.constant.int 0 | |
%int50_1977 = torch.constant.int 50 | |
%int1_1978 = torch.constant.int 1 | |
%1902 = torch.aten.slice.Tensor %1901, %int0_1975, %int0_1976, %int50_1977, %int1_1978 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_1979 = torch.constant.int 6 | |
%1903 = torch.prims.convert_element_type %1902, %int6_1979 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_1980 = torch.constant.int 100 | |
%1904 = torch.aten.div.Scalar %1903, %int100_1980 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_1981 = torch.constant.float 1.000000e+04 | |
%1905 = torch.aten.pow.Scalar %float1.000000e04_1981, %1904 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%1906 = torch.aten.reciprocal %1905 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_1982 = torch.constant.float 1.000000e+00 | |
%1907 = torch.aten.mul.Scalar %1906, %float1.000000e00_1982 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_1983 = torch.constant.int 2048 | |
%int1_1984 = torch.constant.int 1 | |
%1908 = torch.prim.ListConstruct %int2048_1983, %int1_1984 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1909 = torch.aten.view %1900, %1908 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%1910 = torch.aten.mul.Tensor %1909, %1907 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1911 = torch.aten.cos %1910 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1912 = torch.aten.sin %1910 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%1913 = torch.aten.complex %1911, %1912 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_1985 = torch.constant.int 1 | |
%1914 = torch.aten.size.int %1858, %int1_1985 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_1986 = torch.constant.int 0 | |
%1915 = torch.aten.add.int %int0_1986, %1914 : !torch.int, !torch.int -> !torch.int | |
%int0_1987 = torch.constant.int 0 | |
%int0_1988 = torch.constant.int 0 | |
%int1_1989 = torch.constant.int 1 | |
%1916 = torch.aten.slice.Tensor %1913, %int0_1987, %int0_1988, %1915, %int1_1989 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1916, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_1990 = torch.constant.int 1 | |
%int0_1991 = torch.constant.int 0 | |
%int9223372036854775807_1992 = torch.constant.int 9223372036854775807 | |
%int1_1993 = torch.constant.int 1 | |
%1917 = torch.aten.slice.Tensor %1916, %int1_1990, %int0_1991, %int9223372036854775807_1992, %int1_1993 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %1917, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_1994 = torch.constant.int 0 | |
%1918 = torch.aten.unsqueeze %1917, %int0_1994 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %1918, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_1995 = torch.constant.int 2 | |
%1919 = torch.aten.unsqueeze %1918, %int2_1995 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1919, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_1996 = torch.constant.int 3 | |
%int0_1997 = torch.constant.int 0 | |
%int9223372036854775807_1998 = torch.constant.int 9223372036854775807 | |
%int1_1999 = torch.constant.int 1 | |
%1920 = torch.aten.slice.Tensor %1919, %int3_1996, %int0_1997, %int9223372036854775807_1998, %int1_1999 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %1920, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%1921 = torch_c.to_builtin_tensor %1868 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2000 = arith.constant 1 : index | |
%dim_2001 = tensor.dim %1921, %c1_2000 : tensor<1x?x32x100xf16> | |
%1922 = flow.tensor.bitcast %1921 : tensor<1x?x32x100xf16>{%dim_2001} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2001} | |
%1923 = torch_c.from_builtin_tensor %1922 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %1923, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%1924 = torch.aten.mul.Tensor %1923, %1920 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %1924, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%1925 = torch_c.to_builtin_tensor %1924 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2002 = arith.constant 1 : index | |
%dim_2003 = tensor.dim %1925, %c1_2002 : tensor<1x?x32x50xcomplex<f32>> | |
%1926 = flow.tensor.bitcast %1925 : tensor<1x?x32x50xcomplex<f32>>{%dim_2003} -> tensor<1x?x32x100xf32>{%dim_2003} | |
%1927 = torch_c.from_builtin_tensor %1926 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %1927, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2004 = torch.constant.int 5 | |
%1928 = torch.prims.convert_element_type %1927, %int5_2004 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1928, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_2005 = torch.constant.int 52 | |
%1929 = torch.aten.mul.Scalar %arg2, %int52_2005 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1929, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int16_2006 = torch.constant.int 16 | |
%int1_2007 = torch.constant.int 1 | |
%1930 = torch.aten.add.Scalar %1929, %int16_2006, %int1_2007 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1930, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_2008 = torch.constant.int 1 | |
%int16_2009 = torch.constant.int 16 | |
%int32_2010 = torch.constant.int 32 | |
%int100_2011 = torch.constant.int 100 | |
%1931 = torch.prim.ListConstruct %int1_2008, %368, %int16_2009, %int32_2010, %int100_2011 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1932 = torch.aten.view %1928, %1931 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1932, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2012 = torch.constant.int 16 | |
%int32_2013 = torch.constant.int 32 | |
%int100_2014 = torch.constant.int 100 | |
%1933 = torch.prim.ListConstruct %368, %int16_2012, %int32_2013, %int100_2014 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1934 = torch.aten.view %1932, %1933 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1934, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1935 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1936 = torch.aten.view %1930, %1935 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1936, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_2015 = torch.constant.int 1 | |
%int16_2016 = torch.constant.int 16 | |
%int32_2017 = torch.constant.int 32 | |
%int100_2018 = torch.constant.int 100 | |
%1937 = torch.prim.ListConstruct %int1_2015, %368, %int16_2016, %int32_2017, %int100_2018 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1938 = torch.aten.view %1870, %1937 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %1938, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2019 = torch.constant.int 16 | |
%int32_2020 = torch.constant.int 32 | |
%int100_2021 = torch.constant.int 100 | |
%1939 = torch.prim.ListConstruct %368, %int16_2019, %int32_2020, %int100_2021 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1940 = torch.aten.view %1938, %1939 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1940, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_2022 = torch.constant.int 1 | |
%int1_2023 = torch.constant.int 1 | |
%1941 = torch.aten.add.Scalar %1930, %int1_2022, %int1_2023 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %1941, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%1942 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%1943 = torch.aten.view %1941, %1942 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1943, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%1944 = torch.prim.ListConstruct %1936, %1943 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_2024 = torch.constant.int 0 | |
%1945 = torch.aten.cat %1944, %int0_2024 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %1945, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%1946 = torch.prim.ListConstruct %1934, %1940 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_2025 = torch.constant.int 0 | |
%1947 = torch.aten.cat %1946, %int0_2025 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1947, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2026 = torch.constant.int 26 | |
%int2_2027 = torch.constant.int 2 | |
%int16_2028 = torch.constant.int 16 | |
%int32_2029 = torch.constant.int 32 | |
%int100_2030 = torch.constant.int 100 | |
%1948 = torch.prim.ListConstruct %359, %int26_2026, %int2_2027, %int16_2028, %int32_2029, %int100_2030 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1949 = torch.aten.view %1763, %1948 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1949, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_2031 = torch.constant.int 26 | |
%1950 = torch.aten.mul.int %359, %int26_2031 : !torch.int, !torch.int -> !torch.int | |
%int2_2032 = torch.constant.int 2 | |
%1951 = torch.aten.mul.int %1950, %int2_2032 : !torch.int, !torch.int -> !torch.int | |
%int16_2033 = torch.constant.int 16 | |
%int32_2034 = torch.constant.int 32 | |
%int100_2035 = torch.constant.int 100 | |
%1952 = torch.prim.ListConstruct %1951, %int16_2033, %int32_2034, %int100_2035 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1953 = torch.aten.view %1949, %1952 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1953, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%1954 = torch.prim.ListConstruct %1945 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_2036 = torch.constant.bool false | |
%1955 = torch.aten.index_put %1953, %1954, %1947, %false_2036 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %1955, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2037 = torch.constant.int 26 | |
%int2_2038 = torch.constant.int 2 | |
%int16_2039 = torch.constant.int 16 | |
%int32_2040 = torch.constant.int 32 | |
%int100_2041 = torch.constant.int 100 | |
%1956 = torch.prim.ListConstruct %359, %int26_2037, %int2_2038, %int16_2039, %int32_2040, %int100_2041 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1957 = torch.aten.view %1955, %1956 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %1957, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_2042 = torch.constant.int 2662400 | |
%1958 = torch.prim.ListConstruct %359, %int2662400_2042 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1959 = torch.aten.view %1957, %1958 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %1959, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_2043 = torch.constant.int 1 | |
%int2_2044 = torch.constant.int 2 | |
%1960 = torch.aten.transpose.int %1899, %int1_2043, %int2_2044 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1960, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2045 = torch.constant.int 1 | |
%int2_2046 = torch.constant.int 2 | |
%1961 = torch.aten.transpose.int %1928, %int1_2045, %int2_2046 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1961, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2047 = torch.constant.int 1 | |
%int2_2048 = torch.constant.int 2 | |
%1962 = torch.aten.transpose.int %1870, %int1_2047, %int2_2048 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1962, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_2049 = torch.constant.int 2 | |
%int3_2050 = torch.constant.int 3 | |
%1963 = torch.aten.transpose.int %1961, %int2_2049, %int3_2050 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1963, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_2051 = torch.constant.int 1 | |
%int32_2052 = torch.constant.int 32 | |
%int100_2053 = torch.constant.int 100 | |
%1964 = torch.prim.ListConstruct %int1_2051, %int32_2052, %1885, %int100_2053 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2054 = torch.constant.bool false | |
%1965 = torch.aten.expand %1960, %1964, %false_2054 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1965, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2055 = torch.constant.int 32 | |
%int100_2056 = torch.constant.int 100 | |
%1966 = torch.prim.ListConstruct %int32_2055, %1885, %int100_2056 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1967 = torch.aten.view %1965, %1966 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1967, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2057 = torch.constant.int 1 | |
%int32_2058 = torch.constant.int 32 | |
%int100_2059 = torch.constant.int 100 | |
%1968 = torch.prim.ListConstruct %int1_2057, %int32_2058, %int100_2059, %1914 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2060 = torch.constant.bool false | |
%1969 = torch.aten.expand %1963, %1968, %false_2060 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %1969, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_2061 = torch.constant.int 32 | |
%int100_2062 = torch.constant.int 100 | |
%1970 = torch.prim.ListConstruct %int32_2061, %int100_2062, %1914 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1971 = torch.aten.view %1969, %1970 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %1971, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%1972 = torch.aten.bmm %1967, %1971 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1972, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2063 = torch.constant.int 1 | |
%int32_2064 = torch.constant.int 32 | |
%1973 = torch.prim.ListConstruct %int1_2063, %int32_2064, %1885, %1914 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1974 = torch.aten.view %1972, %1973 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1974, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_2065 = torch.constant.float 1.000000e+01 | |
%1975 = torch.aten.div.Scalar %1974, %float1.000000e01_2065 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1975, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2066 = torch.constant.int 1 | |
%1976 = torch.aten.add.Tensor %1975, %266, %int1_2066 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1976, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_2067 = torch.constant.int 6 | |
%1977 = torch.prims.convert_element_type %1976, %int6_2067 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1977, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_2068 = torch.constant.int -1 | |
%false_2069 = torch.constant.bool false | |
%1978 = torch.aten._softmax %1977, %int-1_2068, %false_2069 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %1978, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_2070 = torch.constant.int 5 | |
%1979 = torch.prims.convert_element_type %1978, %int5_2070 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1979, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2071 = torch.constant.int 1 | |
%int32_2072 = torch.constant.int 32 | |
%1980 = torch.prim.ListConstruct %int1_2071, %int32_2072, %1885, %1914 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2073 = torch.constant.bool false | |
%1981 = torch.aten.expand %1979, %1980, %false_2073 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %1981, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_2074 = torch.constant.int 32 | |
%1982 = torch.prim.ListConstruct %int32_2074, %1885, %1914 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1983 = torch.aten.view %1981, %1982 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %1983, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2075 = torch.constant.int 1 | |
%1984 = torch.aten.size.int %1864, %int1_2075 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_2076 = torch.constant.int 1 | |
%int32_2077 = torch.constant.int 32 | |
%int100_2078 = torch.constant.int 100 | |
%1985 = torch.prim.ListConstruct %int1_2076, %int32_2077, %1984, %int100_2078 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2079 = torch.constant.bool false | |
%1986 = torch.aten.expand %1962, %1985, %false_2079 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1986, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2080 = torch.constant.int 32 | |
%int100_2081 = torch.constant.int 100 | |
%1987 = torch.prim.ListConstruct %int32_2080, %1984, %int100_2081 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1988 = torch.aten.view %1986, %1987 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1988, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%1989 = torch.aten.bmm %1983, %1988 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %1989, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2082 = torch.constant.int 1 | |
%int32_2083 = torch.constant.int 32 | |
%int100_2084 = torch.constant.int 100 | |
%1990 = torch.prim.ListConstruct %int1_2082, %int32_2083, %1885, %int100_2084 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1991 = torch.aten.view %1989, %1990 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %1991, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2085 = torch.constant.int 1 | |
%int2_2086 = torch.constant.int 2 | |
%1992 = torch.aten.transpose.int %1991, %int1_2085, %int2_2086 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1992, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_2087 = torch.constant.int 0 | |
%1993 = torch.aten.clone %1992, %int0_2087 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %1993, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2088 = torch.constant.int 1 | |
%int3200_2089 = torch.constant.int 3200 | |
%1994 = torch.prim.ListConstruct %int1_2088, %1885, %int3200_2089 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1995 = torch.aten._unsafe_view %1993, %1994 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %1995, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2090 = torch.constant.int -2 | |
%int-1_2091 = torch.constant.int -1 | |
%1996 = torch.aten.transpose.int %77, %int-2_2090, %int-1_2091 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2092 = torch.constant.int 3200 | |
%1997 = torch.prim.ListConstruct %1885, %int3200_2092 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1998 = torch.aten.view %1995, %1997 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1998, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%1999 = torch.aten.mm %1998, %1996 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %1999, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2093 = torch.constant.int 1 | |
%int3200_2094 = torch.constant.int 3200 | |
%2000 = torch.prim.ListConstruct %int1_2093, %1885, %int3200_2094 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2001 = torch.aten.view %1999, %2000 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2001, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2095 = torch.constant.int 1 | |
%2002 = torch.aten.add.Tensor %1837, %2001, %int1_2095 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2002, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2096 = torch.constant.int 6 | |
%2003 = torch.prims.convert_element_type %2002, %int6_2096 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2003, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2097 = torch.constant.int 2 | |
%2004 = torch.aten.pow.Tensor_Scalar %2003, %int2_2097 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2004, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2098 = torch.constant.int -1 | |
%2005 = torch.prim.ListConstruct %int-1_2098 : (!torch.int) -> !torch.list<int> | |
%true_2099 = torch.constant.bool true | |
%none_2100 = torch.constant.none | |
%2006 = torch.aten.mean.dim %2004, %2005, %true_2099, %none_2100 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2006, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2101 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2102 = torch.constant.int 1 | |
%2007 = torch.aten.add.Scalar %2006, %float9.999990e-07_2101, %int1_2102 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2007, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2008 = torch.aten.rsqrt %2007 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2008, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2009 = torch.aten.mul.Tensor %2003, %2008 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2009, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2010 = torch.aten.mul.Tensor %78, %2009 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2010, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2103 = torch.constant.int 5 | |
%2011 = torch.prims.convert_element_type %2010, %int5_2103 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2011, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2104 = torch.constant.int -2 | |
%int-1_2105 = torch.constant.int -1 | |
%2012 = torch.aten.transpose.int %79, %int-2_2104, %int-1_2105 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2106 = torch.constant.int 3200 | |
%2013 = torch.prim.ListConstruct %240, %int3200_2106 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2014 = torch.aten.view %2011, %2013 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2014, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2015 = torch.aten.mm %2014, %2012 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2015, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2107 = torch.constant.int 1 | |
%int8640_2108 = torch.constant.int 8640 | |
%2016 = torch.prim.ListConstruct %int1_2107, %240, %int8640_2108 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2017 = torch.aten.view %2015, %2016 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2017, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2018 = torch.aten.silu %2017 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2018, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2109 = torch.constant.int -2 | |
%int-1_2110 = torch.constant.int -1 | |
%2019 = torch.aten.transpose.int %80, %int-2_2109, %int-1_2110 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2111 = torch.constant.int 3200 | |
%2020 = torch.prim.ListConstruct %240, %int3200_2111 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2021 = torch.aten.view %2011, %2020 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2021, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2022 = torch.aten.mm %2021, %2019 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2022, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2112 = torch.constant.int 1 | |
%int8640_2113 = torch.constant.int 8640 | |
%2023 = torch.prim.ListConstruct %int1_2112, %240, %int8640_2113 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2024 = torch.aten.view %2022, %2023 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2024, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2025 = torch.aten.mul.Tensor %2018, %2024 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2025, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2114 = torch.constant.int -2 | |
%int-1_2115 = torch.constant.int -1 | |
%2026 = torch.aten.transpose.int %81, %int-2_2114, %int-1_2115 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_2116 = torch.constant.int 1 | |
%2027 = torch.aten.size.int %2017, %int1_2116 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_2117 = torch.constant.int 8640 | |
%2028 = torch.prim.ListConstruct %2027, %int8640_2117 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2029 = torch.aten.view %2025, %2028 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2029, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%2030 = torch.aten.mm %2029, %2026 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2030, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2118 = torch.constant.int 1 | |
%int3200_2119 = torch.constant.int 3200 | |
%2031 = torch.prim.ListConstruct %int1_2118, %2027, %int3200_2119 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2032 = torch.aten.view %2030, %2031 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2032, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2120 = torch.constant.int 1 | |
%2033 = torch.aten.add.Tensor %2002, %2032, %int1_2120 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2033, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2121 = torch.constant.int 6 | |
%2034 = torch.prims.convert_element_type %2033, %int6_2121 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2034, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2122 = torch.constant.int 2 | |
%2035 = torch.aten.pow.Tensor_Scalar %2034, %int2_2122 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2035, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2123 = torch.constant.int -1 | |
%2036 = torch.prim.ListConstruct %int-1_2123 : (!torch.int) -> !torch.list<int> | |
%true_2124 = torch.constant.bool true | |
%none_2125 = torch.constant.none | |
%2037 = torch.aten.mean.dim %2035, %2036, %true_2124, %none_2125 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2037, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2126 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2127 = torch.constant.int 1 | |
%2038 = torch.aten.add.Scalar %2037, %float9.999990e-07_2126, %int1_2127 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2038, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2039 = torch.aten.rsqrt %2038 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2039, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2040 = torch.aten.mul.Tensor %2034, %2039 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2040, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2041 = torch.aten.mul.Tensor %82, %2040 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2041, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2128 = torch.constant.int 5 | |
%2042 = torch.prims.convert_element_type %2041, %int5_2128 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2042, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2129 = torch.constant.int -2 | |
%int-1_2130 = torch.constant.int -1 | |
%2043 = torch.aten.transpose.int %83, %int-2_2129, %int-1_2130 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2131 = torch.constant.int 3200 | |
%2044 = torch.prim.ListConstruct %240, %int3200_2131 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2045 = torch.aten.view %2042, %2044 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2045, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2046 = torch.aten.mm %2045, %2043 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2046, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2132 = torch.constant.int 1 | |
%int3200_2133 = torch.constant.int 3200 | |
%2047 = torch.prim.ListConstruct %int1_2132, %240, %int3200_2133 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2048 = torch.aten.view %2046, %2047 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2048, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2134 = torch.constant.int -2 | |
%int-1_2135 = torch.constant.int -1 | |
%2049 = torch.aten.transpose.int %84, %int-2_2134, %int-1_2135 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2136 = torch.constant.int 3200 | |
%2050 = torch.prim.ListConstruct %240, %int3200_2136 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2051 = torch.aten.view %2042, %2050 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2051, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2052 = torch.aten.mm %2051, %2049 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2052, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2137 = torch.constant.int 1 | |
%int3200_2138 = torch.constant.int 3200 | |
%2053 = torch.prim.ListConstruct %int1_2137, %240, %int3200_2138 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2054 = torch.aten.view %2052, %2053 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2054, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2139 = torch.constant.int -2 | |
%int-1_2140 = torch.constant.int -1 | |
%2055 = torch.aten.transpose.int %85, %int-2_2139, %int-1_2140 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2141 = torch.constant.int 3200 | |
%2056 = torch.prim.ListConstruct %240, %int3200_2141 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2057 = torch.aten.view %2042, %2056 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2057, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2058 = torch.aten.mm %2057, %2055 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2058, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2142 = torch.constant.int 1 | |
%int3200_2143 = torch.constant.int 3200 | |
%2059 = torch.prim.ListConstruct %int1_2142, %240, %int3200_2143 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2060 = torch.aten.view %2058, %2059 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2060, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2144 = torch.constant.int 1 | |
%int32_2145 = torch.constant.int 32 | |
%int100_2146 = torch.constant.int 100 | |
%2061 = torch.prim.ListConstruct %int1_2144, %240, %int32_2145, %int100_2146 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2062 = torch.aten.view %2048, %2061 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2062, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2147 = torch.constant.int 1 | |
%int32_2148 = torch.constant.int 32 | |
%int100_2149 = torch.constant.int 100 | |
%2063 = torch.prim.ListConstruct %int1_2147, %240, %int32_2148, %int100_2149 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2064 = torch.aten.view %2054, %2063 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2064, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2150 = torch.constant.int 1 | |
%int32_2151 = torch.constant.int 32 | |
%int100_2152 = torch.constant.int 100 | |
%2065 = torch.prim.ListConstruct %int1_2150, %240, %int32_2151, %int100_2152 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2066 = torch.aten.view %2060, %2065 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2066, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2153 = torch.constant.int 2048 | |
%none_2154 = torch.constant.none | |
%none_2155 = torch.constant.none | |
%cpu_2156 = torch.constant.device "cpu" | |
%false_2157 = torch.constant.bool false | |
%2067 = torch.aten.arange %int2048_2153, %none_2154, %none_2155, %cpu_2156, %false_2157 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2158 = torch.constant.int 0 | |
%int100_2159 = torch.constant.int 100 | |
%int2_2160 = torch.constant.int 2 | |
%none_2161 = torch.constant.none | |
%none_2162 = torch.constant.none | |
%cpu_2163 = torch.constant.device "cpu" | |
%false_2164 = torch.constant.bool false | |
%2068 = torch.aten.arange.start_step %int0_2158, %int100_2159, %int2_2160, %none_2161, %none_2162, %cpu_2163, %false_2164 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2165 = torch.constant.int 0 | |
%int0_2166 = torch.constant.int 0 | |
%int50_2167 = torch.constant.int 50 | |
%int1_2168 = torch.constant.int 1 | |
%2069 = torch.aten.slice.Tensor %2068, %int0_2165, %int0_2166, %int50_2167, %int1_2168 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2169 = torch.constant.int 6 | |
%2070 = torch.prims.convert_element_type %2069, %int6_2169 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2170 = torch.constant.int 100 | |
%2071 = torch.aten.div.Scalar %2070, %int100_2170 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2171 = torch.constant.float 1.000000e+04 | |
%2072 = torch.aten.pow.Scalar %float1.000000e04_2171, %2071 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2073 = torch.aten.reciprocal %2072 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2172 = torch.constant.float 1.000000e+00 | |
%2074 = torch.aten.mul.Scalar %2073, %float1.000000e00_2172 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2173 = torch.constant.int 2048 | |
%int1_2174 = torch.constant.int 1 | |
%2075 = torch.prim.ListConstruct %int2048_2173, %int1_2174 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2076 = torch.aten.view %2067, %2075 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2077 = torch.aten.mul.Tensor %2076, %2074 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2078 = torch.aten.cos %2077 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2079 = torch.aten.sin %2077 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2080 = torch.aten.complex %2078, %2079 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2175 = torch.constant.int 1 | |
%2081 = torch.aten.size.int %2048, %int1_2175 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2176 = torch.constant.int 0 | |
%2082 = torch.aten.add.int %int0_2176, %2081 : !torch.int, !torch.int -> !torch.int | |
%int0_2177 = torch.constant.int 0 | |
%int0_2178 = torch.constant.int 0 | |
%int1_2179 = torch.constant.int 1 | |
%2083 = torch.aten.slice.Tensor %2080, %int0_2177, %int0_2178, %2082, %int1_2179 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2083, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2180 = torch.constant.int 1 | |
%int0_2181 = torch.constant.int 0 | |
%int9223372036854775807_2182 = torch.constant.int 9223372036854775807 | |
%int1_2183 = torch.constant.int 1 | |
%2084 = torch.aten.slice.Tensor %2083, %int1_2180, %int0_2181, %int9223372036854775807_2182, %int1_2183 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2084, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2184 = torch.constant.int 0 | |
%2085 = torch.aten.unsqueeze %2084, %int0_2184 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2085, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2185 = torch.constant.int 2 | |
%2086 = torch.aten.unsqueeze %2085, %int2_2185 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2086, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2186 = torch.constant.int 3 | |
%int0_2187 = torch.constant.int 0 | |
%int9223372036854775807_2188 = torch.constant.int 9223372036854775807 | |
%int1_2189 = torch.constant.int 1 | |
%2087 = torch.aten.slice.Tensor %2086, %int3_2186, %int0_2187, %int9223372036854775807_2188, %int1_2189 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2087, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2088 = torch_c.to_builtin_tensor %2062 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2190 = arith.constant 1 : index | |
%dim_2191 = tensor.dim %2088, %c1_2190 : tensor<1x?x32x100xf16> | |
%2089 = flow.tensor.bitcast %2088 : tensor<1x?x32x100xf16>{%dim_2191} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2191} | |
%2090 = torch_c.from_builtin_tensor %2089 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2090, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2091 = torch.aten.mul.Tensor %2090, %2087 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2091, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2092 = torch_c.to_builtin_tensor %2091 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2192 = arith.constant 1 : index | |
%dim_2193 = tensor.dim %2092, %c1_2192 : tensor<1x?x32x50xcomplex<f32>> | |
%2093 = flow.tensor.bitcast %2092 : tensor<1x?x32x50xcomplex<f32>>{%dim_2193} -> tensor<1x?x32x100xf32>{%dim_2193} | |
%2094 = torch_c.from_builtin_tensor %2093 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2094, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2194 = torch.constant.int 5 | |
%2095 = torch.prims.convert_element_type %2094, %int5_2194 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2095, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2195 = torch.constant.int 2048 | |
%none_2196 = torch.constant.none | |
%none_2197 = torch.constant.none | |
%cpu_2198 = torch.constant.device "cpu" | |
%false_2199 = torch.constant.bool false | |
%2096 = torch.aten.arange %int2048_2195, %none_2196, %none_2197, %cpu_2198, %false_2199 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2200 = torch.constant.int 0 | |
%int100_2201 = torch.constant.int 100 | |
%int2_2202 = torch.constant.int 2 | |
%none_2203 = torch.constant.none | |
%none_2204 = torch.constant.none | |
%cpu_2205 = torch.constant.device "cpu" | |
%false_2206 = torch.constant.bool false | |
%2097 = torch.aten.arange.start_step %int0_2200, %int100_2201, %int2_2202, %none_2203, %none_2204, %cpu_2205, %false_2206 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2207 = torch.constant.int 0 | |
%int0_2208 = torch.constant.int 0 | |
%int50_2209 = torch.constant.int 50 | |
%int1_2210 = torch.constant.int 1 | |
%2098 = torch.aten.slice.Tensor %2097, %int0_2207, %int0_2208, %int50_2209, %int1_2210 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2211 = torch.constant.int 6 | |
%2099 = torch.prims.convert_element_type %2098, %int6_2211 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2212 = torch.constant.int 100 | |
%2100 = torch.aten.div.Scalar %2099, %int100_2212 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2213 = torch.constant.float 1.000000e+04 | |
%2101 = torch.aten.pow.Scalar %float1.000000e04_2213, %2100 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2102 = torch.aten.reciprocal %2101 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2214 = torch.constant.float 1.000000e+00 | |
%2103 = torch.aten.mul.Scalar %2102, %float1.000000e00_2214 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2215 = torch.constant.int 2048 | |
%int1_2216 = torch.constant.int 1 | |
%2104 = torch.prim.ListConstruct %int2048_2215, %int1_2216 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2105 = torch.aten.view %2096, %2104 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2106 = torch.aten.mul.Tensor %2105, %2103 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2107 = torch.aten.cos %2106 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2108 = torch.aten.sin %2106 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2109 = torch.aten.complex %2107, %2108 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2217 = torch.constant.int 1 | |
%2110 = torch.aten.size.int %2054, %int1_2217 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2218 = torch.constant.int 0 | |
%2111 = torch.aten.add.int %int0_2218, %2110 : !torch.int, !torch.int -> !torch.int | |
%int0_2219 = torch.constant.int 0 | |
%int0_2220 = torch.constant.int 0 | |
%int1_2221 = torch.constant.int 1 | |
%2112 = torch.aten.slice.Tensor %2109, %int0_2219, %int0_2220, %2111, %int1_2221 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2112, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2222 = torch.constant.int 1 | |
%int0_2223 = torch.constant.int 0 | |
%int9223372036854775807_2224 = torch.constant.int 9223372036854775807 | |
%int1_2225 = torch.constant.int 1 | |
%2113 = torch.aten.slice.Tensor %2112, %int1_2222, %int0_2223, %int9223372036854775807_2224, %int1_2225 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2113, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2226 = torch.constant.int 0 | |
%2114 = torch.aten.unsqueeze %2113, %int0_2226 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2114, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2227 = torch.constant.int 2 | |
%2115 = torch.aten.unsqueeze %2114, %int2_2227 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2115, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2228 = torch.constant.int 3 | |
%int0_2229 = torch.constant.int 0 | |
%int9223372036854775807_2230 = torch.constant.int 9223372036854775807 | |
%int1_2231 = torch.constant.int 1 | |
%2116 = torch.aten.slice.Tensor %2115, %int3_2228, %int0_2229, %int9223372036854775807_2230, %int1_2231 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2116, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2117 = torch_c.to_builtin_tensor %2064 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2232 = arith.constant 1 : index | |
%dim_2233 = tensor.dim %2117, %c1_2232 : tensor<1x?x32x100xf16> | |
%2118 = flow.tensor.bitcast %2117 : tensor<1x?x32x100xf16>{%dim_2233} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2233} | |
%2119 = torch_c.from_builtin_tensor %2118 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2119, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2120 = torch.aten.mul.Tensor %2119, %2116 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2120, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2121 = torch_c.to_builtin_tensor %2120 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2234 = arith.constant 1 : index | |
%dim_2235 = tensor.dim %2121, %c1_2234 : tensor<1x?x32x50xcomplex<f32>> | |
%2122 = flow.tensor.bitcast %2121 : tensor<1x?x32x50xcomplex<f32>>{%dim_2235} -> tensor<1x?x32x100xf32>{%dim_2235} | |
%2123 = torch_c.from_builtin_tensor %2122 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2123, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2236 = torch.constant.int 5 | |
%2124 = torch.prims.convert_element_type %2123, %int5_2236 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2124, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_2237 = torch.constant.int 52 | |
%2125 = torch.aten.mul.Scalar %arg2, %int52_2237 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2125, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int18 = torch.constant.int 18 | |
%int1_2238 = torch.constant.int 1 | |
%2126 = torch.aten.add.Scalar %2125, %int18, %int1_2238 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2126, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_2239 = torch.constant.int 1 | |
%int16_2240 = torch.constant.int 16 | |
%int32_2241 = torch.constant.int 32 | |
%int100_2242 = torch.constant.int 100 | |
%2127 = torch.prim.ListConstruct %int1_2239, %368, %int16_2240, %int32_2241, %int100_2242 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2128 = torch.aten.view %2124, %2127 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2128, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2243 = torch.constant.int 16 | |
%int32_2244 = torch.constant.int 32 | |
%int100_2245 = torch.constant.int 100 | |
%2129 = torch.prim.ListConstruct %368, %int16_2243, %int32_2244, %int100_2245 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2130 = torch.aten.view %2128, %2129 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2130, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2131 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2132 = torch.aten.view %2126, %2131 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2132, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_2246 = torch.constant.int 1 | |
%int16_2247 = torch.constant.int 16 | |
%int32_2248 = torch.constant.int 32 | |
%int100_2249 = torch.constant.int 100 | |
%2133 = torch.prim.ListConstruct %int1_2246, %368, %int16_2247, %int32_2248, %int100_2249 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2134 = torch.aten.view %2066, %2133 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2134, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2250 = torch.constant.int 16 | |
%int32_2251 = torch.constant.int 32 | |
%int100_2252 = torch.constant.int 100 | |
%2135 = torch.prim.ListConstruct %368, %int16_2250, %int32_2251, %int100_2252 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2136 = torch.aten.view %2134, %2135 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2136, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_2253 = torch.constant.int 1 | |
%int1_2254 = torch.constant.int 1 | |
%2137 = torch.aten.add.Scalar %2126, %int1_2253, %int1_2254 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2137, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%2138 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2139 = torch.aten.view %2137, %2138 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2139, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%2140 = torch.prim.ListConstruct %2132, %2139 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_2255 = torch.constant.int 0 | |
%2141 = torch.aten.cat %2140, %int0_2255 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2141, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%2142 = torch.prim.ListConstruct %2130, %2136 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_2256 = torch.constant.int 0 | |
%2143 = torch.aten.cat %2142, %int0_2256 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2143, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2257 = torch.constant.int 26 | |
%int2_2258 = torch.constant.int 2 | |
%int16_2259 = torch.constant.int 16 | |
%int32_2260 = torch.constant.int 32 | |
%int100_2261 = torch.constant.int 100 | |
%2144 = torch.prim.ListConstruct %359, %int26_2257, %int2_2258, %int16_2259, %int32_2260, %int100_2261 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2145 = torch.aten.view %1959, %2144 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2145, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_2262 = torch.constant.int 26 | |
%2146 = torch.aten.mul.int %359, %int26_2262 : !torch.int, !torch.int -> !torch.int | |
%int2_2263 = torch.constant.int 2 | |
%2147 = torch.aten.mul.int %2146, %int2_2263 : !torch.int, !torch.int -> !torch.int | |
%int16_2264 = torch.constant.int 16 | |
%int32_2265 = torch.constant.int 32 | |
%int100_2266 = torch.constant.int 100 | |
%2148 = torch.prim.ListConstruct %2147, %int16_2264, %int32_2265, %int100_2266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2149 = torch.aten.view %2145, %2148 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2149, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2150 = torch.prim.ListConstruct %2141 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_2267 = torch.constant.bool false | |
%2151 = torch.aten.index_put %2149, %2150, %2143, %false_2267 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2151, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2268 = torch.constant.int 26 | |
%int2_2269 = torch.constant.int 2 | |
%int16_2270 = torch.constant.int 16 | |
%int32_2271 = torch.constant.int 32 | |
%int100_2272 = torch.constant.int 100 | |
%2152 = torch.prim.ListConstruct %359, %int26_2268, %int2_2269, %int16_2270, %int32_2271, %int100_2272 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2153 = torch.aten.view %2151, %2152 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2153, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_2273 = torch.constant.int 2662400 | |
%2154 = torch.prim.ListConstruct %359, %int2662400_2273 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2155 = torch.aten.view %2153, %2154 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %2155, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_2274 = torch.constant.int 1 | |
%int2_2275 = torch.constant.int 2 | |
%2156 = torch.aten.transpose.int %2095, %int1_2274, %int2_2275 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2156, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2276 = torch.constant.int 1 | |
%int2_2277 = torch.constant.int 2 | |
%2157 = torch.aten.transpose.int %2124, %int1_2276, %int2_2277 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2157, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2278 = torch.constant.int 1 | |
%int2_2279 = torch.constant.int 2 | |
%2158 = torch.aten.transpose.int %2066, %int1_2278, %int2_2279 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2158, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_2280 = torch.constant.int 2 | |
%int3_2281 = torch.constant.int 3 | |
%2159 = torch.aten.transpose.int %2157, %int2_2280, %int3_2281 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2159, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_2282 = torch.constant.int 1 | |
%int32_2283 = torch.constant.int 32 | |
%int100_2284 = torch.constant.int 100 | |
%2160 = torch.prim.ListConstruct %int1_2282, %int32_2283, %2081, %int100_2284 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2285 = torch.constant.bool false | |
%2161 = torch.aten.expand %2156, %2160, %false_2285 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2161, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2286 = torch.constant.int 32 | |
%int100_2287 = torch.constant.int 100 | |
%2162 = torch.prim.ListConstruct %int32_2286, %2081, %int100_2287 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2163 = torch.aten.view %2161, %2162 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2163, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2288 = torch.constant.int 1 | |
%int32_2289 = torch.constant.int 32 | |
%int100_2290 = torch.constant.int 100 | |
%2164 = torch.prim.ListConstruct %int1_2288, %int32_2289, %int100_2290, %2110 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2291 = torch.constant.bool false | |
%2165 = torch.aten.expand %2159, %2164, %false_2291 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2165, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_2292 = torch.constant.int 32 | |
%int100_2293 = torch.constant.int 100 | |
%2166 = torch.prim.ListConstruct %int32_2292, %int100_2293, %2110 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2167 = torch.aten.view %2165, %2166 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %2167, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%2168 = torch.aten.bmm %2163, %2167 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2168, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2294 = torch.constant.int 1 | |
%int32_2295 = torch.constant.int 32 | |
%2169 = torch.prim.ListConstruct %int1_2294, %int32_2295, %2081, %2110 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2170 = torch.aten.view %2168, %2169 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2170, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_2296 = torch.constant.float 1.000000e+01 | |
%2171 = torch.aten.div.Scalar %2170, %float1.000000e01_2296 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2171, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2297 = torch.constant.int 1 | |
%2172 = torch.aten.add.Tensor %2171, %266, %int1_2297 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2172, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_2298 = torch.constant.int 6 | |
%2173 = torch.prims.convert_element_type %2172, %int6_2298 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2173, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_2299 = torch.constant.int -1 | |
%false_2300 = torch.constant.bool false | |
%2174 = torch.aten._softmax %2173, %int-1_2299, %false_2300 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2174, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_2301 = torch.constant.int 5 | |
%2175 = torch.prims.convert_element_type %2174, %int5_2301 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2175, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2302 = torch.constant.int 1 | |
%int32_2303 = torch.constant.int 32 | |
%2176 = torch.prim.ListConstruct %int1_2302, %int32_2303, %2081, %2110 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2304 = torch.constant.bool false | |
%2177 = torch.aten.expand %2175, %2176, %false_2304 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2177, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_2305 = torch.constant.int 32 | |
%2178 = torch.prim.ListConstruct %int32_2305, %2081, %2110 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2179 = torch.aten.view %2177, %2178 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2179, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2306 = torch.constant.int 1 | |
%2180 = torch.aten.size.int %2060, %int1_2306 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_2307 = torch.constant.int 1 | |
%int32_2308 = torch.constant.int 32 | |
%int100_2309 = torch.constant.int 100 | |
%2181 = torch.prim.ListConstruct %int1_2307, %int32_2308, %2180, %int100_2309 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2310 = torch.constant.bool false | |
%2182 = torch.aten.expand %2158, %2181, %false_2310 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2182, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2311 = torch.constant.int 32 | |
%int100_2312 = torch.constant.int 100 | |
%2183 = torch.prim.ListConstruct %int32_2311, %2180, %int100_2312 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2184 = torch.aten.view %2182, %2183 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2184, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%2185 = torch.aten.bmm %2179, %2184 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2185, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2313 = torch.constant.int 1 | |
%int32_2314 = torch.constant.int 32 | |
%int100_2315 = torch.constant.int 100 | |
%2186 = torch.prim.ListConstruct %int1_2313, %int32_2314, %2081, %int100_2315 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2187 = torch.aten.view %2185, %2186 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2187, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2316 = torch.constant.int 1 | |
%int2_2317 = torch.constant.int 2 | |
%2188 = torch.aten.transpose.int %2187, %int1_2316, %int2_2317 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2188, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_2318 = torch.constant.int 0 | |
%2189 = torch.aten.clone %2188, %int0_2318 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2189, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2319 = torch.constant.int 1 | |
%int3200_2320 = torch.constant.int 3200 | |
%2190 = torch.prim.ListConstruct %int1_2319, %2081, %int3200_2320 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2191 = torch.aten._unsafe_view %2189, %2190 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2191, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2321 = torch.constant.int -2 | |
%int-1_2322 = torch.constant.int -1 | |
%2192 = torch.aten.transpose.int %86, %int-2_2321, %int-1_2322 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2323 = torch.constant.int 3200 | |
%2193 = torch.prim.ListConstruct %2081, %int3200_2323 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2194 = torch.aten.view %2191, %2193 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2194, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2195 = torch.aten.mm %2194, %2192 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2195, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2324 = torch.constant.int 1 | |
%int3200_2325 = torch.constant.int 3200 | |
%2196 = torch.prim.ListConstruct %int1_2324, %2081, %int3200_2325 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2197 = torch.aten.view %2195, %2196 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2197, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2326 = torch.constant.int 1 | |
%2198 = torch.aten.add.Tensor %2033, %2197, %int1_2326 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2198, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2327 = torch.constant.int 6 | |
%2199 = torch.prims.convert_element_type %2198, %int6_2327 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2199, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2328 = torch.constant.int 2 | |
%2200 = torch.aten.pow.Tensor_Scalar %2199, %int2_2328 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2200, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2329 = torch.constant.int -1 | |
%2201 = torch.prim.ListConstruct %int-1_2329 : (!torch.int) -> !torch.list<int> | |
%true_2330 = torch.constant.bool true | |
%none_2331 = torch.constant.none | |
%2202 = torch.aten.mean.dim %2200, %2201, %true_2330, %none_2331 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2202, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2332 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2333 = torch.constant.int 1 | |
%2203 = torch.aten.add.Scalar %2202, %float9.999990e-07_2332, %int1_2333 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2203, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2204 = torch.aten.rsqrt %2203 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2204, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2205 = torch.aten.mul.Tensor %2199, %2204 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2205, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2206 = torch.aten.mul.Tensor %87, %2205 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2206, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2334 = torch.constant.int 5 | |
%2207 = torch.prims.convert_element_type %2206, %int5_2334 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2207, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2335 = torch.constant.int -2 | |
%int-1_2336 = torch.constant.int -1 | |
%2208 = torch.aten.transpose.int %88, %int-2_2335, %int-1_2336 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2337 = torch.constant.int 3200 | |
%2209 = torch.prim.ListConstruct %240, %int3200_2337 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2210 = torch.aten.view %2207, %2209 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2210, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2211 = torch.aten.mm %2210, %2208 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2211, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2338 = torch.constant.int 1 | |
%int8640_2339 = torch.constant.int 8640 | |
%2212 = torch.prim.ListConstruct %int1_2338, %240, %int8640_2339 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2213 = torch.aten.view %2211, %2212 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2213, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2214 = torch.aten.silu %2213 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2214, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2340 = torch.constant.int -2 | |
%int-1_2341 = torch.constant.int -1 | |
%2215 = torch.aten.transpose.int %89, %int-2_2340, %int-1_2341 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2342 = torch.constant.int 3200 | |
%2216 = torch.prim.ListConstruct %240, %int3200_2342 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2217 = torch.aten.view %2207, %2216 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2217, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2218 = torch.aten.mm %2217, %2215 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2218, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2343 = torch.constant.int 1 | |
%int8640_2344 = torch.constant.int 8640 | |
%2219 = torch.prim.ListConstruct %int1_2343, %240, %int8640_2344 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2220 = torch.aten.view %2218, %2219 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2220, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2221 = torch.aten.mul.Tensor %2214, %2220 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2221, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2345 = torch.constant.int -2 | |
%int-1_2346 = torch.constant.int -1 | |
%2222 = torch.aten.transpose.int %90, %int-2_2345, %int-1_2346 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_2347 = torch.constant.int 1 | |
%2223 = torch.aten.size.int %2213, %int1_2347 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_2348 = torch.constant.int 8640 | |
%2224 = torch.prim.ListConstruct %2223, %int8640_2348 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2225 = torch.aten.view %2221, %2224 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2225, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%2226 = torch.aten.mm %2225, %2222 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2226, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2349 = torch.constant.int 1 | |
%int3200_2350 = torch.constant.int 3200 | |
%2227 = torch.prim.ListConstruct %int1_2349, %2223, %int3200_2350 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2228 = torch.aten.view %2226, %2227 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2228, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2351 = torch.constant.int 1 | |
%2229 = torch.aten.add.Tensor %2198, %2228, %int1_2351 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2229, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2352 = torch.constant.int 6 | |
%2230 = torch.prims.convert_element_type %2229, %int6_2352 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2230, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2353 = torch.constant.int 2 | |
%2231 = torch.aten.pow.Tensor_Scalar %2230, %int2_2353 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2231, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2354 = torch.constant.int -1 | |
%2232 = torch.prim.ListConstruct %int-1_2354 : (!torch.int) -> !torch.list<int> | |
%true_2355 = torch.constant.bool true | |
%none_2356 = torch.constant.none | |
%2233 = torch.aten.mean.dim %2231, %2232, %true_2355, %none_2356 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2233, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2357 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2358 = torch.constant.int 1 | |
%2234 = torch.aten.add.Scalar %2233, %float9.999990e-07_2357, %int1_2358 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2234, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2235 = torch.aten.rsqrt %2234 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2235, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2236 = torch.aten.mul.Tensor %2230, %2235 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2236, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2237 = torch.aten.mul.Tensor %91, %2236 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2237, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2359 = torch.constant.int 5 | |
%2238 = torch.prims.convert_element_type %2237, %int5_2359 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2238, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2360 = torch.constant.int -2 | |
%int-1_2361 = torch.constant.int -1 | |
%2239 = torch.aten.transpose.int %92, %int-2_2360, %int-1_2361 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2362 = torch.constant.int 3200 | |
%2240 = torch.prim.ListConstruct %240, %int3200_2362 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2241 = torch.aten.view %2238, %2240 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2241, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2242 = torch.aten.mm %2241, %2239 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2242, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2363 = torch.constant.int 1 | |
%int3200_2364 = torch.constant.int 3200 | |
%2243 = torch.prim.ListConstruct %int1_2363, %240, %int3200_2364 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2244 = torch.aten.view %2242, %2243 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2244, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2365 = torch.constant.int -2 | |
%int-1_2366 = torch.constant.int -1 | |
%2245 = torch.aten.transpose.int %93, %int-2_2365, %int-1_2366 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2367 = torch.constant.int 3200 | |
%2246 = torch.prim.ListConstruct %240, %int3200_2367 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2247 = torch.aten.view %2238, %2246 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2247, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2248 = torch.aten.mm %2247, %2245 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2248, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2368 = torch.constant.int 1 | |
%int3200_2369 = torch.constant.int 3200 | |
%2249 = torch.prim.ListConstruct %int1_2368, %240, %int3200_2369 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2250 = torch.aten.view %2248, %2249 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2250, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2370 = torch.constant.int -2 | |
%int-1_2371 = torch.constant.int -1 | |
%2251 = torch.aten.transpose.int %94, %int-2_2370, %int-1_2371 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2372 = torch.constant.int 3200 | |
%2252 = torch.prim.ListConstruct %240, %int3200_2372 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2253 = torch.aten.view %2238, %2252 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2253, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2254 = torch.aten.mm %2253, %2251 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2254, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2373 = torch.constant.int 1 | |
%int3200_2374 = torch.constant.int 3200 | |
%2255 = torch.prim.ListConstruct %int1_2373, %240, %int3200_2374 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2256 = torch.aten.view %2254, %2255 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2256, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2375 = torch.constant.int 1 | |
%int32_2376 = torch.constant.int 32 | |
%int100_2377 = torch.constant.int 100 | |
%2257 = torch.prim.ListConstruct %int1_2375, %240, %int32_2376, %int100_2377 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2258 = torch.aten.view %2244, %2257 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2258, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2378 = torch.constant.int 1 | |
%int32_2379 = torch.constant.int 32 | |
%int100_2380 = torch.constant.int 100 | |
%2259 = torch.prim.ListConstruct %int1_2378, %240, %int32_2379, %int100_2380 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2260 = torch.aten.view %2250, %2259 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2260, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2381 = torch.constant.int 1 | |
%int32_2382 = torch.constant.int 32 | |
%int100_2383 = torch.constant.int 100 | |
%2261 = torch.prim.ListConstruct %int1_2381, %240, %int32_2382, %int100_2383 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2262 = torch.aten.view %2256, %2261 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2262, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2384 = torch.constant.int 2048 | |
%none_2385 = torch.constant.none | |
%none_2386 = torch.constant.none | |
%cpu_2387 = torch.constant.device "cpu" | |
%false_2388 = torch.constant.bool false | |
%2263 = torch.aten.arange %int2048_2384, %none_2385, %none_2386, %cpu_2387, %false_2388 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2389 = torch.constant.int 0 | |
%int100_2390 = torch.constant.int 100 | |
%int2_2391 = torch.constant.int 2 | |
%none_2392 = torch.constant.none | |
%none_2393 = torch.constant.none | |
%cpu_2394 = torch.constant.device "cpu" | |
%false_2395 = torch.constant.bool false | |
%2264 = torch.aten.arange.start_step %int0_2389, %int100_2390, %int2_2391, %none_2392, %none_2393, %cpu_2394, %false_2395 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2396 = torch.constant.int 0 | |
%int0_2397 = torch.constant.int 0 | |
%int50_2398 = torch.constant.int 50 | |
%int1_2399 = torch.constant.int 1 | |
%2265 = torch.aten.slice.Tensor %2264, %int0_2396, %int0_2397, %int50_2398, %int1_2399 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2400 = torch.constant.int 6 | |
%2266 = torch.prims.convert_element_type %2265, %int6_2400 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2401 = torch.constant.int 100 | |
%2267 = torch.aten.div.Scalar %2266, %int100_2401 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2402 = torch.constant.float 1.000000e+04 | |
%2268 = torch.aten.pow.Scalar %float1.000000e04_2402, %2267 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2269 = torch.aten.reciprocal %2268 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2403 = torch.constant.float 1.000000e+00 | |
%2270 = torch.aten.mul.Scalar %2269, %float1.000000e00_2403 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2404 = torch.constant.int 2048 | |
%int1_2405 = torch.constant.int 1 | |
%2271 = torch.prim.ListConstruct %int2048_2404, %int1_2405 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2272 = torch.aten.view %2263, %2271 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2273 = torch.aten.mul.Tensor %2272, %2270 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2274 = torch.aten.cos %2273 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2275 = torch.aten.sin %2273 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2276 = torch.aten.complex %2274, %2275 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2406 = torch.constant.int 1 | |
%2277 = torch.aten.size.int %2244, %int1_2406 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2407 = torch.constant.int 0 | |
%2278 = torch.aten.add.int %int0_2407, %2277 : !torch.int, !torch.int -> !torch.int | |
%int0_2408 = torch.constant.int 0 | |
%int0_2409 = torch.constant.int 0 | |
%int1_2410 = torch.constant.int 1 | |
%2279 = torch.aten.slice.Tensor %2276, %int0_2408, %int0_2409, %2278, %int1_2410 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2279, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2411 = torch.constant.int 1 | |
%int0_2412 = torch.constant.int 0 | |
%int9223372036854775807_2413 = torch.constant.int 9223372036854775807 | |
%int1_2414 = torch.constant.int 1 | |
%2280 = torch.aten.slice.Tensor %2279, %int1_2411, %int0_2412, %int9223372036854775807_2413, %int1_2414 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2280, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2415 = torch.constant.int 0 | |
%2281 = torch.aten.unsqueeze %2280, %int0_2415 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2281, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2416 = torch.constant.int 2 | |
%2282 = torch.aten.unsqueeze %2281, %int2_2416 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2282, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2417 = torch.constant.int 3 | |
%int0_2418 = torch.constant.int 0 | |
%int9223372036854775807_2419 = torch.constant.int 9223372036854775807 | |
%int1_2420 = torch.constant.int 1 | |
%2283 = torch.aten.slice.Tensor %2282, %int3_2417, %int0_2418, %int9223372036854775807_2419, %int1_2420 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2283, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2284 = torch_c.to_builtin_tensor %2258 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2421 = arith.constant 1 : index | |
%dim_2422 = tensor.dim %2284, %c1_2421 : tensor<1x?x32x100xf16> | |
%2285 = flow.tensor.bitcast %2284 : tensor<1x?x32x100xf16>{%dim_2422} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2422} | |
%2286 = torch_c.from_builtin_tensor %2285 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2286, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2287 = torch.aten.mul.Tensor %2286, %2283 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2287, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2288 = torch_c.to_builtin_tensor %2287 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2423 = arith.constant 1 : index | |
%dim_2424 = tensor.dim %2288, %c1_2423 : tensor<1x?x32x50xcomplex<f32>> | |
%2289 = flow.tensor.bitcast %2288 : tensor<1x?x32x50xcomplex<f32>>{%dim_2424} -> tensor<1x?x32x100xf32>{%dim_2424} | |
%2290 = torch_c.from_builtin_tensor %2289 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2290, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2425 = torch.constant.int 5 | |
%2291 = torch.prims.convert_element_type %2290, %int5_2425 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2291, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2426 = torch.constant.int 2048 | |
%none_2427 = torch.constant.none | |
%none_2428 = torch.constant.none | |
%cpu_2429 = torch.constant.device "cpu" | |
%false_2430 = torch.constant.bool false | |
%2292 = torch.aten.arange %int2048_2426, %none_2427, %none_2428, %cpu_2429, %false_2430 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2431 = torch.constant.int 0 | |
%int100_2432 = torch.constant.int 100 | |
%int2_2433 = torch.constant.int 2 | |
%none_2434 = torch.constant.none | |
%none_2435 = torch.constant.none | |
%cpu_2436 = torch.constant.device "cpu" | |
%false_2437 = torch.constant.bool false | |
%2293 = torch.aten.arange.start_step %int0_2431, %int100_2432, %int2_2433, %none_2434, %none_2435, %cpu_2436, %false_2437 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2438 = torch.constant.int 0 | |
%int0_2439 = torch.constant.int 0 | |
%int50_2440 = torch.constant.int 50 | |
%int1_2441 = torch.constant.int 1 | |
%2294 = torch.aten.slice.Tensor %2293, %int0_2438, %int0_2439, %int50_2440, %int1_2441 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2442 = torch.constant.int 6 | |
%2295 = torch.prims.convert_element_type %2294, %int6_2442 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2443 = torch.constant.int 100 | |
%2296 = torch.aten.div.Scalar %2295, %int100_2443 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2444 = torch.constant.float 1.000000e+04 | |
%2297 = torch.aten.pow.Scalar %float1.000000e04_2444, %2296 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2298 = torch.aten.reciprocal %2297 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2445 = torch.constant.float 1.000000e+00 | |
%2299 = torch.aten.mul.Scalar %2298, %float1.000000e00_2445 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2446 = torch.constant.int 2048 | |
%int1_2447 = torch.constant.int 1 | |
%2300 = torch.prim.ListConstruct %int2048_2446, %int1_2447 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2301 = torch.aten.view %2292, %2300 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2302 = torch.aten.mul.Tensor %2301, %2299 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2303 = torch.aten.cos %2302 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2304 = torch.aten.sin %2302 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2305 = torch.aten.complex %2303, %2304 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2448 = torch.constant.int 1 | |
%2306 = torch.aten.size.int %2250, %int1_2448 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2449 = torch.constant.int 0 | |
%2307 = torch.aten.add.int %int0_2449, %2306 : !torch.int, !torch.int -> !torch.int | |
%int0_2450 = torch.constant.int 0 | |
%int0_2451 = torch.constant.int 0 | |
%int1_2452 = torch.constant.int 1 | |
%2308 = torch.aten.slice.Tensor %2305, %int0_2450, %int0_2451, %2307, %int1_2452 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2308, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2453 = torch.constant.int 1 | |
%int0_2454 = torch.constant.int 0 | |
%int9223372036854775807_2455 = torch.constant.int 9223372036854775807 | |
%int1_2456 = torch.constant.int 1 | |
%2309 = torch.aten.slice.Tensor %2308, %int1_2453, %int0_2454, %int9223372036854775807_2455, %int1_2456 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2309, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2457 = torch.constant.int 0 | |
%2310 = torch.aten.unsqueeze %2309, %int0_2457 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2310, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2458 = torch.constant.int 2 | |
%2311 = torch.aten.unsqueeze %2310, %int2_2458 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2311, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2459 = torch.constant.int 3 | |
%int0_2460 = torch.constant.int 0 | |
%int9223372036854775807_2461 = torch.constant.int 9223372036854775807 | |
%int1_2462 = torch.constant.int 1 | |
%2312 = torch.aten.slice.Tensor %2311, %int3_2459, %int0_2460, %int9223372036854775807_2461, %int1_2462 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2312, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2313 = torch_c.to_builtin_tensor %2260 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2463 = arith.constant 1 : index | |
%dim_2464 = tensor.dim %2313, %c1_2463 : tensor<1x?x32x100xf16> | |
%2314 = flow.tensor.bitcast %2313 : tensor<1x?x32x100xf16>{%dim_2464} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2464} | |
%2315 = torch_c.from_builtin_tensor %2314 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2315, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2316 = torch.aten.mul.Tensor %2315, %2312 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2316, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2317 = torch_c.to_builtin_tensor %2316 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2465 = arith.constant 1 : index | |
%dim_2466 = tensor.dim %2317, %c1_2465 : tensor<1x?x32x50xcomplex<f32>> | |
%2318 = flow.tensor.bitcast %2317 : tensor<1x?x32x50xcomplex<f32>>{%dim_2466} -> tensor<1x?x32x100xf32>{%dim_2466} | |
%2319 = torch_c.from_builtin_tensor %2318 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2319, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2467 = torch.constant.int 5 | |
%2320 = torch.prims.convert_element_type %2319, %int5_2467 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2320, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_2468 = torch.constant.int 52 | |
%2321 = torch.aten.mul.Scalar %arg2, %int52_2468 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2321, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int20 = torch.constant.int 20 | |
%int1_2469 = torch.constant.int 1 | |
%2322 = torch.aten.add.Scalar %2321, %int20, %int1_2469 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2322, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_2470 = torch.constant.int 1 | |
%int16_2471 = torch.constant.int 16 | |
%int32_2472 = torch.constant.int 32 | |
%int100_2473 = torch.constant.int 100 | |
%2323 = torch.prim.ListConstruct %int1_2470, %368, %int16_2471, %int32_2472, %int100_2473 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2324 = torch.aten.view %2320, %2323 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2324, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2474 = torch.constant.int 16 | |
%int32_2475 = torch.constant.int 32 | |
%int100_2476 = torch.constant.int 100 | |
%2325 = torch.prim.ListConstruct %368, %int16_2474, %int32_2475, %int100_2476 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2326 = torch.aten.view %2324, %2325 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2326, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2327 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2328 = torch.aten.view %2322, %2327 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2328, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_2477 = torch.constant.int 1 | |
%int16_2478 = torch.constant.int 16 | |
%int32_2479 = torch.constant.int 32 | |
%int100_2480 = torch.constant.int 100 | |
%2329 = torch.prim.ListConstruct %int1_2477, %368, %int16_2478, %int32_2479, %int100_2480 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2330 = torch.aten.view %2262, %2329 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2330, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2481 = torch.constant.int 16 | |
%int32_2482 = torch.constant.int 32 | |
%int100_2483 = torch.constant.int 100 | |
%2331 = torch.prim.ListConstruct %368, %int16_2481, %int32_2482, %int100_2483 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2332 = torch.aten.view %2330, %2331 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2332, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_2484 = torch.constant.int 1 | |
%int1_2485 = torch.constant.int 1 | |
%2333 = torch.aten.add.Scalar %2322, %int1_2484, %int1_2485 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2333, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%2334 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2335 = torch.aten.view %2333, %2334 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2335, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%2336 = torch.prim.ListConstruct %2328, %2335 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_2486 = torch.constant.int 0 | |
%2337 = torch.aten.cat %2336, %int0_2486 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2337, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%2338 = torch.prim.ListConstruct %2326, %2332 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_2487 = torch.constant.int 0 | |
%2339 = torch.aten.cat %2338, %int0_2487 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2339, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2488 = torch.constant.int 26 | |
%int2_2489 = torch.constant.int 2 | |
%int16_2490 = torch.constant.int 16 | |
%int32_2491 = torch.constant.int 32 | |
%int100_2492 = torch.constant.int 100 | |
%2340 = torch.prim.ListConstruct %359, %int26_2488, %int2_2489, %int16_2490, %int32_2491, %int100_2492 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2341 = torch.aten.view %2155, %2340 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2341, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_2493 = torch.constant.int 26 | |
%2342 = torch.aten.mul.int %359, %int26_2493 : !torch.int, !torch.int -> !torch.int | |
%int2_2494 = torch.constant.int 2 | |
%2343 = torch.aten.mul.int %2342, %int2_2494 : !torch.int, !torch.int -> !torch.int | |
%int16_2495 = torch.constant.int 16 | |
%int32_2496 = torch.constant.int 32 | |
%int100_2497 = torch.constant.int 100 | |
%2344 = torch.prim.ListConstruct %2343, %int16_2495, %int32_2496, %int100_2497 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2345 = torch.aten.view %2341, %2344 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2345, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2346 = torch.prim.ListConstruct %2337 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_2498 = torch.constant.bool false | |
%2347 = torch.aten.index_put %2345, %2346, %2339, %false_2498 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2347, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2499 = torch.constant.int 26 | |
%int2_2500 = torch.constant.int 2 | |
%int16_2501 = torch.constant.int 16 | |
%int32_2502 = torch.constant.int 32 | |
%int100_2503 = torch.constant.int 100 | |
%2348 = torch.prim.ListConstruct %359, %int26_2499, %int2_2500, %int16_2501, %int32_2502, %int100_2503 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2349 = torch.aten.view %2347, %2348 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2349, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_2504 = torch.constant.int 2662400 | |
%2350 = torch.prim.ListConstruct %359, %int2662400_2504 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2351 = torch.aten.view %2349, %2350 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %2351, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_2505 = torch.constant.int 1 | |
%int2_2506 = torch.constant.int 2 | |
%2352 = torch.aten.transpose.int %2291, %int1_2505, %int2_2506 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2352, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2507 = torch.constant.int 1 | |
%int2_2508 = torch.constant.int 2 | |
%2353 = torch.aten.transpose.int %2320, %int1_2507, %int2_2508 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2353, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2509 = torch.constant.int 1 | |
%int2_2510 = torch.constant.int 2 | |
%2354 = torch.aten.transpose.int %2262, %int1_2509, %int2_2510 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2354, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_2511 = torch.constant.int 2 | |
%int3_2512 = torch.constant.int 3 | |
%2355 = torch.aten.transpose.int %2353, %int2_2511, %int3_2512 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2355, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_2513 = torch.constant.int 1 | |
%int32_2514 = torch.constant.int 32 | |
%int100_2515 = torch.constant.int 100 | |
%2356 = torch.prim.ListConstruct %int1_2513, %int32_2514, %2277, %int100_2515 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2516 = torch.constant.bool false | |
%2357 = torch.aten.expand %2352, %2356, %false_2516 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2357, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2517 = torch.constant.int 32 | |
%int100_2518 = torch.constant.int 100 | |
%2358 = torch.prim.ListConstruct %int32_2517, %2277, %int100_2518 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2359 = torch.aten.view %2357, %2358 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2359, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2519 = torch.constant.int 1 | |
%int32_2520 = torch.constant.int 32 | |
%int100_2521 = torch.constant.int 100 | |
%2360 = torch.prim.ListConstruct %int1_2519, %int32_2520, %int100_2521, %2306 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2522 = torch.constant.bool false | |
%2361 = torch.aten.expand %2355, %2360, %false_2522 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2361, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_2523 = torch.constant.int 32 | |
%int100_2524 = torch.constant.int 100 | |
%2362 = torch.prim.ListConstruct %int32_2523, %int100_2524, %2306 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2363 = torch.aten.view %2361, %2362 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %2363, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%2364 = torch.aten.bmm %2359, %2363 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2364, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2525 = torch.constant.int 1 | |
%int32_2526 = torch.constant.int 32 | |
%2365 = torch.prim.ListConstruct %int1_2525, %int32_2526, %2277, %2306 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2366 = torch.aten.view %2364, %2365 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2366, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_2527 = torch.constant.float 1.000000e+01 | |
%2367 = torch.aten.div.Scalar %2366, %float1.000000e01_2527 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2367, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2528 = torch.constant.int 1 | |
%2368 = torch.aten.add.Tensor %2367, %266, %int1_2528 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2368, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_2529 = torch.constant.int 6 | |
%2369 = torch.prims.convert_element_type %2368, %int6_2529 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2369, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_2530 = torch.constant.int -1 | |
%false_2531 = torch.constant.bool false | |
%2370 = torch.aten._softmax %2369, %int-1_2530, %false_2531 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2370, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_2532 = torch.constant.int 5 | |
%2371 = torch.prims.convert_element_type %2370, %int5_2532 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2371, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2533 = torch.constant.int 1 | |
%int32_2534 = torch.constant.int 32 | |
%2372 = torch.prim.ListConstruct %int1_2533, %int32_2534, %2277, %2306 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2535 = torch.constant.bool false | |
%2373 = torch.aten.expand %2371, %2372, %false_2535 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2373, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_2536 = torch.constant.int 32 | |
%2374 = torch.prim.ListConstruct %int32_2536, %2277, %2306 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2375 = torch.aten.view %2373, %2374 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2375, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2537 = torch.constant.int 1 | |
%2376 = torch.aten.size.int %2256, %int1_2537 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_2538 = torch.constant.int 1 | |
%int32_2539 = torch.constant.int 32 | |
%int100_2540 = torch.constant.int 100 | |
%2377 = torch.prim.ListConstruct %int1_2538, %int32_2539, %2376, %int100_2540 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2541 = torch.constant.bool false | |
%2378 = torch.aten.expand %2354, %2377, %false_2541 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2378, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2542 = torch.constant.int 32 | |
%int100_2543 = torch.constant.int 100 | |
%2379 = torch.prim.ListConstruct %int32_2542, %2376, %int100_2543 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2380 = torch.aten.view %2378, %2379 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2380, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%2381 = torch.aten.bmm %2375, %2380 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2381, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2544 = torch.constant.int 1 | |
%int32_2545 = torch.constant.int 32 | |
%int100_2546 = torch.constant.int 100 | |
%2382 = torch.prim.ListConstruct %int1_2544, %int32_2545, %2277, %int100_2546 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2383 = torch.aten.view %2381, %2382 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2383, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2547 = torch.constant.int 1 | |
%int2_2548 = torch.constant.int 2 | |
%2384 = torch.aten.transpose.int %2383, %int1_2547, %int2_2548 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2384, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_2549 = torch.constant.int 0 | |
%2385 = torch.aten.clone %2384, %int0_2549 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2385, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2550 = torch.constant.int 1 | |
%int3200_2551 = torch.constant.int 3200 | |
%2386 = torch.prim.ListConstruct %int1_2550, %2277, %int3200_2551 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2387 = torch.aten._unsafe_view %2385, %2386 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2387, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2552 = torch.constant.int -2 | |
%int-1_2553 = torch.constant.int -1 | |
%2388 = torch.aten.transpose.int %95, %int-2_2552, %int-1_2553 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2554 = torch.constant.int 3200 | |
%2389 = torch.prim.ListConstruct %2277, %int3200_2554 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2390 = torch.aten.view %2387, %2389 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2390, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2391 = torch.aten.mm %2390, %2388 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2391, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2555 = torch.constant.int 1 | |
%int3200_2556 = torch.constant.int 3200 | |
%2392 = torch.prim.ListConstruct %int1_2555, %2277, %int3200_2556 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2393 = torch.aten.view %2391, %2392 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2393, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2557 = torch.constant.int 1 | |
%2394 = torch.aten.add.Tensor %2229, %2393, %int1_2557 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2394, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2558 = torch.constant.int 6 | |
%2395 = torch.prims.convert_element_type %2394, %int6_2558 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2395, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2559 = torch.constant.int 2 | |
%2396 = torch.aten.pow.Tensor_Scalar %2395, %int2_2559 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2396, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2560 = torch.constant.int -1 | |
%2397 = torch.prim.ListConstruct %int-1_2560 : (!torch.int) -> !torch.list<int> | |
%true_2561 = torch.constant.bool true | |
%none_2562 = torch.constant.none | |
%2398 = torch.aten.mean.dim %2396, %2397, %true_2561, %none_2562 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2398, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2563 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2564 = torch.constant.int 1 | |
%2399 = torch.aten.add.Scalar %2398, %float9.999990e-07_2563, %int1_2564 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2399, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2400 = torch.aten.rsqrt %2399 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2400, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2401 = torch.aten.mul.Tensor %2395, %2400 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2401, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2402 = torch.aten.mul.Tensor %96, %2401 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2402, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2565 = torch.constant.int 5 | |
%2403 = torch.prims.convert_element_type %2402, %int5_2565 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2403, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2566 = torch.constant.int -2 | |
%int-1_2567 = torch.constant.int -1 | |
%2404 = torch.aten.transpose.int %97, %int-2_2566, %int-1_2567 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2568 = torch.constant.int 3200 | |
%2405 = torch.prim.ListConstruct %240, %int3200_2568 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2406 = torch.aten.view %2403, %2405 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2406, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2407 = torch.aten.mm %2406, %2404 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2407, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2569 = torch.constant.int 1 | |
%int8640_2570 = torch.constant.int 8640 | |
%2408 = torch.prim.ListConstruct %int1_2569, %240, %int8640_2570 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2409 = torch.aten.view %2407, %2408 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2409, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2410 = torch.aten.silu %2409 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2410, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2571 = torch.constant.int -2 | |
%int-1_2572 = torch.constant.int -1 | |
%2411 = torch.aten.transpose.int %98, %int-2_2571, %int-1_2572 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2573 = torch.constant.int 3200 | |
%2412 = torch.prim.ListConstruct %240, %int3200_2573 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2413 = torch.aten.view %2403, %2412 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2413, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2414 = torch.aten.mm %2413, %2411 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2414, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2574 = torch.constant.int 1 | |
%int8640_2575 = torch.constant.int 8640 | |
%2415 = torch.prim.ListConstruct %int1_2574, %240, %int8640_2575 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2416 = torch.aten.view %2414, %2415 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2416, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2417 = torch.aten.mul.Tensor %2410, %2416 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2417, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2576 = torch.constant.int -2 | |
%int-1_2577 = torch.constant.int -1 | |
%2418 = torch.aten.transpose.int %99, %int-2_2576, %int-1_2577 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_2578 = torch.constant.int 1 | |
%2419 = torch.aten.size.int %2409, %int1_2578 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_2579 = torch.constant.int 8640 | |
%2420 = torch.prim.ListConstruct %2419, %int8640_2579 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2421 = torch.aten.view %2417, %2420 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2421, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%2422 = torch.aten.mm %2421, %2418 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2422, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2580 = torch.constant.int 1 | |
%int3200_2581 = torch.constant.int 3200 | |
%2423 = torch.prim.ListConstruct %int1_2580, %2419, %int3200_2581 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2424 = torch.aten.view %2422, %2423 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2424, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2582 = torch.constant.int 1 | |
%2425 = torch.aten.add.Tensor %2394, %2424, %int1_2582 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2425, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2583 = torch.constant.int 6 | |
%2426 = torch.prims.convert_element_type %2425, %int6_2583 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2426, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2584 = torch.constant.int 2 | |
%2427 = torch.aten.pow.Tensor_Scalar %2426, %int2_2584 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2427, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2585 = torch.constant.int -1 | |
%2428 = torch.prim.ListConstruct %int-1_2585 : (!torch.int) -> !torch.list<int> | |
%true_2586 = torch.constant.bool true | |
%none_2587 = torch.constant.none | |
%2429 = torch.aten.mean.dim %2427, %2428, %true_2586, %none_2587 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2429, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2588 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2589 = torch.constant.int 1 | |
%2430 = torch.aten.add.Scalar %2429, %float9.999990e-07_2588, %int1_2589 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2430, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2431 = torch.aten.rsqrt %2430 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2431, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2432 = torch.aten.mul.Tensor %2426, %2431 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2432, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2433 = torch.aten.mul.Tensor %100, %2432 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2433, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2590 = torch.constant.int 5 | |
%2434 = torch.prims.convert_element_type %2433, %int5_2590 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2434, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2591 = torch.constant.int -2 | |
%int-1_2592 = torch.constant.int -1 | |
%2435 = torch.aten.transpose.int %101, %int-2_2591, %int-1_2592 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2593 = torch.constant.int 3200 | |
%2436 = torch.prim.ListConstruct %240, %int3200_2593 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2437 = torch.aten.view %2434, %2436 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2437, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2438 = torch.aten.mm %2437, %2435 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2438, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2594 = torch.constant.int 1 | |
%int3200_2595 = torch.constant.int 3200 | |
%2439 = torch.prim.ListConstruct %int1_2594, %240, %int3200_2595 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2440 = torch.aten.view %2438, %2439 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2440, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2596 = torch.constant.int -2 | |
%int-1_2597 = torch.constant.int -1 | |
%2441 = torch.aten.transpose.int %102, %int-2_2596, %int-1_2597 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2598 = torch.constant.int 3200 | |
%2442 = torch.prim.ListConstruct %240, %int3200_2598 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2443 = torch.aten.view %2434, %2442 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2443, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2444 = torch.aten.mm %2443, %2441 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2444, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2599 = torch.constant.int 1 | |
%int3200_2600 = torch.constant.int 3200 | |
%2445 = torch.prim.ListConstruct %int1_2599, %240, %int3200_2600 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2446 = torch.aten.view %2444, %2445 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2446, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2601 = torch.constant.int -2 | |
%int-1_2602 = torch.constant.int -1 | |
%2447 = torch.aten.transpose.int %103, %int-2_2601, %int-1_2602 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2603 = torch.constant.int 3200 | |
%2448 = torch.prim.ListConstruct %240, %int3200_2603 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2449 = torch.aten.view %2434, %2448 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2449, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2450 = torch.aten.mm %2449, %2447 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2450, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2604 = torch.constant.int 1 | |
%int3200_2605 = torch.constant.int 3200 | |
%2451 = torch.prim.ListConstruct %int1_2604, %240, %int3200_2605 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2452 = torch.aten.view %2450, %2451 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2452, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2606 = torch.constant.int 1 | |
%int32_2607 = torch.constant.int 32 | |
%int100_2608 = torch.constant.int 100 | |
%2453 = torch.prim.ListConstruct %int1_2606, %240, %int32_2607, %int100_2608 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2454 = torch.aten.view %2440, %2453 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2454, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2609 = torch.constant.int 1 | |
%int32_2610 = torch.constant.int 32 | |
%int100_2611 = torch.constant.int 100 | |
%2455 = torch.prim.ListConstruct %int1_2609, %240, %int32_2610, %int100_2611 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2456 = torch.aten.view %2446, %2455 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2456, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2612 = torch.constant.int 1 | |
%int32_2613 = torch.constant.int 32 | |
%int100_2614 = torch.constant.int 100 | |
%2457 = torch.prim.ListConstruct %int1_2612, %240, %int32_2613, %int100_2614 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2458 = torch.aten.view %2452, %2457 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2458, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2615 = torch.constant.int 2048 | |
%none_2616 = torch.constant.none | |
%none_2617 = torch.constant.none | |
%cpu_2618 = torch.constant.device "cpu" | |
%false_2619 = torch.constant.bool false | |
%2459 = torch.aten.arange %int2048_2615, %none_2616, %none_2617, %cpu_2618, %false_2619 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2620 = torch.constant.int 0 | |
%int100_2621 = torch.constant.int 100 | |
%int2_2622 = torch.constant.int 2 | |
%none_2623 = torch.constant.none | |
%none_2624 = torch.constant.none | |
%cpu_2625 = torch.constant.device "cpu" | |
%false_2626 = torch.constant.bool false | |
%2460 = torch.aten.arange.start_step %int0_2620, %int100_2621, %int2_2622, %none_2623, %none_2624, %cpu_2625, %false_2626 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2627 = torch.constant.int 0 | |
%int0_2628 = torch.constant.int 0 | |
%int50_2629 = torch.constant.int 50 | |
%int1_2630 = torch.constant.int 1 | |
%2461 = torch.aten.slice.Tensor %2460, %int0_2627, %int0_2628, %int50_2629, %int1_2630 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2631 = torch.constant.int 6 | |
%2462 = torch.prims.convert_element_type %2461, %int6_2631 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2632 = torch.constant.int 100 | |
%2463 = torch.aten.div.Scalar %2462, %int100_2632 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2633 = torch.constant.float 1.000000e+04 | |
%2464 = torch.aten.pow.Scalar %float1.000000e04_2633, %2463 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2465 = torch.aten.reciprocal %2464 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2634 = torch.constant.float 1.000000e+00 | |
%2466 = torch.aten.mul.Scalar %2465, %float1.000000e00_2634 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2635 = torch.constant.int 2048 | |
%int1_2636 = torch.constant.int 1 | |
%2467 = torch.prim.ListConstruct %int2048_2635, %int1_2636 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2468 = torch.aten.view %2459, %2467 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2469 = torch.aten.mul.Tensor %2468, %2466 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2470 = torch.aten.cos %2469 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2471 = torch.aten.sin %2469 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2472 = torch.aten.complex %2470, %2471 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2637 = torch.constant.int 1 | |
%2473 = torch.aten.size.int %2440, %int1_2637 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2638 = torch.constant.int 0 | |
%2474 = torch.aten.add.int %int0_2638, %2473 : !torch.int, !torch.int -> !torch.int | |
%int0_2639 = torch.constant.int 0 | |
%int0_2640 = torch.constant.int 0 | |
%int1_2641 = torch.constant.int 1 | |
%2475 = torch.aten.slice.Tensor %2472, %int0_2639, %int0_2640, %2474, %int1_2641 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2475, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2642 = torch.constant.int 1 | |
%int0_2643 = torch.constant.int 0 | |
%int9223372036854775807_2644 = torch.constant.int 9223372036854775807 | |
%int1_2645 = torch.constant.int 1 | |
%2476 = torch.aten.slice.Tensor %2475, %int1_2642, %int0_2643, %int9223372036854775807_2644, %int1_2645 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2476, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2646 = torch.constant.int 0 | |
%2477 = torch.aten.unsqueeze %2476, %int0_2646 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2477, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2647 = torch.constant.int 2 | |
%2478 = torch.aten.unsqueeze %2477, %int2_2647 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2478, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2648 = torch.constant.int 3 | |
%int0_2649 = torch.constant.int 0 | |
%int9223372036854775807_2650 = torch.constant.int 9223372036854775807 | |
%int1_2651 = torch.constant.int 1 | |
%2479 = torch.aten.slice.Tensor %2478, %int3_2648, %int0_2649, %int9223372036854775807_2650, %int1_2651 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2479, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2480 = torch_c.to_builtin_tensor %2454 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2652 = arith.constant 1 : index | |
%dim_2653 = tensor.dim %2480, %c1_2652 : tensor<1x?x32x100xf16> | |
%2481 = flow.tensor.bitcast %2480 : tensor<1x?x32x100xf16>{%dim_2653} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2653} | |
%2482 = torch_c.from_builtin_tensor %2481 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2482, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2483 = torch.aten.mul.Tensor %2482, %2479 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2483, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2484 = torch_c.to_builtin_tensor %2483 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2654 = arith.constant 1 : index | |
%dim_2655 = tensor.dim %2484, %c1_2654 : tensor<1x?x32x50xcomplex<f32>> | |
%2485 = flow.tensor.bitcast %2484 : tensor<1x?x32x50xcomplex<f32>>{%dim_2655} -> tensor<1x?x32x100xf32>{%dim_2655} | |
%2486 = torch_c.from_builtin_tensor %2485 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2486, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2656 = torch.constant.int 5 | |
%2487 = torch.prims.convert_element_type %2486, %int5_2656 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2487, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2657 = torch.constant.int 2048 | |
%none_2658 = torch.constant.none | |
%none_2659 = torch.constant.none | |
%cpu_2660 = torch.constant.device "cpu" | |
%false_2661 = torch.constant.bool false | |
%2488 = torch.aten.arange %int2048_2657, %none_2658, %none_2659, %cpu_2660, %false_2661 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2662 = torch.constant.int 0 | |
%int100_2663 = torch.constant.int 100 | |
%int2_2664 = torch.constant.int 2 | |
%none_2665 = torch.constant.none | |
%none_2666 = torch.constant.none | |
%cpu_2667 = torch.constant.device "cpu" | |
%false_2668 = torch.constant.bool false | |
%2489 = torch.aten.arange.start_step %int0_2662, %int100_2663, %int2_2664, %none_2665, %none_2666, %cpu_2667, %false_2668 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2669 = torch.constant.int 0 | |
%int0_2670 = torch.constant.int 0 | |
%int50_2671 = torch.constant.int 50 | |
%int1_2672 = torch.constant.int 1 | |
%2490 = torch.aten.slice.Tensor %2489, %int0_2669, %int0_2670, %int50_2671, %int1_2672 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2673 = torch.constant.int 6 | |
%2491 = torch.prims.convert_element_type %2490, %int6_2673 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2674 = torch.constant.int 100 | |
%2492 = torch.aten.div.Scalar %2491, %int100_2674 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2675 = torch.constant.float 1.000000e+04 | |
%2493 = torch.aten.pow.Scalar %float1.000000e04_2675, %2492 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2494 = torch.aten.reciprocal %2493 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2676 = torch.constant.float 1.000000e+00 | |
%2495 = torch.aten.mul.Scalar %2494, %float1.000000e00_2676 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2677 = torch.constant.int 2048 | |
%int1_2678 = torch.constant.int 1 | |
%2496 = torch.prim.ListConstruct %int2048_2677, %int1_2678 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2497 = torch.aten.view %2488, %2496 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2498 = torch.aten.mul.Tensor %2497, %2495 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2499 = torch.aten.cos %2498 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2500 = torch.aten.sin %2498 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2501 = torch.aten.complex %2499, %2500 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2679 = torch.constant.int 1 | |
%2502 = torch.aten.size.int %2446, %int1_2679 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2680 = torch.constant.int 0 | |
%2503 = torch.aten.add.int %int0_2680, %2502 : !torch.int, !torch.int -> !torch.int | |
%int0_2681 = torch.constant.int 0 | |
%int0_2682 = torch.constant.int 0 | |
%int1_2683 = torch.constant.int 1 | |
%2504 = torch.aten.slice.Tensor %2501, %int0_2681, %int0_2682, %2503, %int1_2683 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2504, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2684 = torch.constant.int 1 | |
%int0_2685 = torch.constant.int 0 | |
%int9223372036854775807_2686 = torch.constant.int 9223372036854775807 | |
%int1_2687 = torch.constant.int 1 | |
%2505 = torch.aten.slice.Tensor %2504, %int1_2684, %int0_2685, %int9223372036854775807_2686, %int1_2687 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2505, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2688 = torch.constant.int 0 | |
%2506 = torch.aten.unsqueeze %2505, %int0_2688 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2506, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2689 = torch.constant.int 2 | |
%2507 = torch.aten.unsqueeze %2506, %int2_2689 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2507, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2690 = torch.constant.int 3 | |
%int0_2691 = torch.constant.int 0 | |
%int9223372036854775807_2692 = torch.constant.int 9223372036854775807 | |
%int1_2693 = torch.constant.int 1 | |
%2508 = torch.aten.slice.Tensor %2507, %int3_2690, %int0_2691, %int9223372036854775807_2692, %int1_2693 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2508, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2509 = torch_c.to_builtin_tensor %2456 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2694 = arith.constant 1 : index | |
%dim_2695 = tensor.dim %2509, %c1_2694 : tensor<1x?x32x100xf16> | |
%2510 = flow.tensor.bitcast %2509 : tensor<1x?x32x100xf16>{%dim_2695} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2695} | |
%2511 = torch_c.from_builtin_tensor %2510 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2511, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2512 = torch.aten.mul.Tensor %2511, %2508 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2512, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2513 = torch_c.to_builtin_tensor %2512 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2696 = arith.constant 1 : index | |
%dim_2697 = tensor.dim %2513, %c1_2696 : tensor<1x?x32x50xcomplex<f32>> | |
%2514 = flow.tensor.bitcast %2513 : tensor<1x?x32x50xcomplex<f32>>{%dim_2697} -> tensor<1x?x32x100xf32>{%dim_2697} | |
%2515 = torch_c.from_builtin_tensor %2514 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2515, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2698 = torch.constant.int 5 | |
%2516 = torch.prims.convert_element_type %2515, %int5_2698 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2516, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_2699 = torch.constant.int 52 | |
%2517 = torch.aten.mul.Scalar %arg2, %int52_2699 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2517, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int22 = torch.constant.int 22 | |
%int1_2700 = torch.constant.int 1 | |
%2518 = torch.aten.add.Scalar %2517, %int22, %int1_2700 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2518, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_2701 = torch.constant.int 1 | |
%int16_2702 = torch.constant.int 16 | |
%int32_2703 = torch.constant.int 32 | |
%int100_2704 = torch.constant.int 100 | |
%2519 = torch.prim.ListConstruct %int1_2701, %368, %int16_2702, %int32_2703, %int100_2704 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2520 = torch.aten.view %2516, %2519 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2520, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2705 = torch.constant.int 16 | |
%int32_2706 = torch.constant.int 32 | |
%int100_2707 = torch.constant.int 100 | |
%2521 = torch.prim.ListConstruct %368, %int16_2705, %int32_2706, %int100_2707 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2522 = torch.aten.view %2520, %2521 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2522, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2523 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2524 = torch.aten.view %2518, %2523 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2524, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_2708 = torch.constant.int 1 | |
%int16_2709 = torch.constant.int 16 | |
%int32_2710 = torch.constant.int 32 | |
%int100_2711 = torch.constant.int 100 | |
%2525 = torch.prim.ListConstruct %int1_2708, %368, %int16_2709, %int32_2710, %int100_2711 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2526 = torch.aten.view %2458, %2525 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2526, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2712 = torch.constant.int 16 | |
%int32_2713 = torch.constant.int 32 | |
%int100_2714 = torch.constant.int 100 | |
%2527 = torch.prim.ListConstruct %368, %int16_2712, %int32_2713, %int100_2714 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2528 = torch.aten.view %2526, %2527 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2528, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_2715 = torch.constant.int 1 | |
%int1_2716 = torch.constant.int 1 | |
%2529 = torch.aten.add.Scalar %2518, %int1_2715, %int1_2716 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2529, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%2530 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2531 = torch.aten.view %2529, %2530 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2531, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%2532 = torch.prim.ListConstruct %2524, %2531 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_2717 = torch.constant.int 0 | |
%2533 = torch.aten.cat %2532, %int0_2717 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2533, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%2534 = torch.prim.ListConstruct %2522, %2528 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_2718 = torch.constant.int 0 | |
%2535 = torch.aten.cat %2534, %int0_2718 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2535, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2719 = torch.constant.int 26 | |
%int2_2720 = torch.constant.int 2 | |
%int16_2721 = torch.constant.int 16 | |
%int32_2722 = torch.constant.int 32 | |
%int100_2723 = torch.constant.int 100 | |
%2536 = torch.prim.ListConstruct %359, %int26_2719, %int2_2720, %int16_2721, %int32_2722, %int100_2723 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2537 = torch.aten.view %2351, %2536 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2537, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_2724 = torch.constant.int 26 | |
%2538 = torch.aten.mul.int %359, %int26_2724 : !torch.int, !torch.int -> !torch.int | |
%int2_2725 = torch.constant.int 2 | |
%2539 = torch.aten.mul.int %2538, %int2_2725 : !torch.int, !torch.int -> !torch.int | |
%int16_2726 = torch.constant.int 16 | |
%int32_2727 = torch.constant.int 32 | |
%int100_2728 = torch.constant.int 100 | |
%2540 = torch.prim.ListConstruct %2539, %int16_2726, %int32_2727, %int100_2728 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2541 = torch.aten.view %2537, %2540 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2541, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2542 = torch.prim.ListConstruct %2533 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_2729 = torch.constant.bool false | |
%2543 = torch.aten.index_put %2541, %2542, %2535, %false_2729 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2543, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2730 = torch.constant.int 26 | |
%int2_2731 = torch.constant.int 2 | |
%int16_2732 = torch.constant.int 16 | |
%int32_2733 = torch.constant.int 32 | |
%int100_2734 = torch.constant.int 100 | |
%2544 = torch.prim.ListConstruct %359, %int26_2730, %int2_2731, %int16_2732, %int32_2733, %int100_2734 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2545 = torch.aten.view %2543, %2544 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2545, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_2735 = torch.constant.int 2662400 | |
%2546 = torch.prim.ListConstruct %359, %int2662400_2735 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2547 = torch.aten.view %2545, %2546 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %2547, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_2736 = torch.constant.int 1 | |
%int2_2737 = torch.constant.int 2 | |
%2548 = torch.aten.transpose.int %2487, %int1_2736, %int2_2737 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2548, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2738 = torch.constant.int 1 | |
%int2_2739 = torch.constant.int 2 | |
%2549 = torch.aten.transpose.int %2516, %int1_2738, %int2_2739 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2549, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2740 = torch.constant.int 1 | |
%int2_2741 = torch.constant.int 2 | |
%2550 = torch.aten.transpose.int %2458, %int1_2740, %int2_2741 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2550, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_2742 = torch.constant.int 2 | |
%int3_2743 = torch.constant.int 3 | |
%2551 = torch.aten.transpose.int %2549, %int2_2742, %int3_2743 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2551, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_2744 = torch.constant.int 1 | |
%int32_2745 = torch.constant.int 32 | |
%int100_2746 = torch.constant.int 100 | |
%2552 = torch.prim.ListConstruct %int1_2744, %int32_2745, %2473, %int100_2746 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2747 = torch.constant.bool false | |
%2553 = torch.aten.expand %2548, %2552, %false_2747 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2553, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2748 = torch.constant.int 32 | |
%int100_2749 = torch.constant.int 100 | |
%2554 = torch.prim.ListConstruct %int32_2748, %2473, %int100_2749 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2555 = torch.aten.view %2553, %2554 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2555, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2750 = torch.constant.int 1 | |
%int32_2751 = torch.constant.int 32 | |
%int100_2752 = torch.constant.int 100 | |
%2556 = torch.prim.ListConstruct %int1_2750, %int32_2751, %int100_2752, %2502 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2753 = torch.constant.bool false | |
%2557 = torch.aten.expand %2551, %2556, %false_2753 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2557, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_2754 = torch.constant.int 32 | |
%int100_2755 = torch.constant.int 100 | |
%2558 = torch.prim.ListConstruct %int32_2754, %int100_2755, %2502 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2559 = torch.aten.view %2557, %2558 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %2559, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%2560 = torch.aten.bmm %2555, %2559 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2560, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2756 = torch.constant.int 1 | |
%int32_2757 = torch.constant.int 32 | |
%2561 = torch.prim.ListConstruct %int1_2756, %int32_2757, %2473, %2502 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2562 = torch.aten.view %2560, %2561 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2562, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_2758 = torch.constant.float 1.000000e+01 | |
%2563 = torch.aten.div.Scalar %2562, %float1.000000e01_2758 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2563, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2759 = torch.constant.int 1 | |
%2564 = torch.aten.add.Tensor %2563, %266, %int1_2759 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2564, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_2760 = torch.constant.int 6 | |
%2565 = torch.prims.convert_element_type %2564, %int6_2760 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2565, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_2761 = torch.constant.int -1 | |
%false_2762 = torch.constant.bool false | |
%2566 = torch.aten._softmax %2565, %int-1_2761, %false_2762 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2566, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_2763 = torch.constant.int 5 | |
%2567 = torch.prims.convert_element_type %2566, %int5_2763 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2567, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2764 = torch.constant.int 1 | |
%int32_2765 = torch.constant.int 32 | |
%2568 = torch.prim.ListConstruct %int1_2764, %int32_2765, %2473, %2502 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2766 = torch.constant.bool false | |
%2569 = torch.aten.expand %2567, %2568, %false_2766 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2569, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_2767 = torch.constant.int 32 | |
%2570 = torch.prim.ListConstruct %int32_2767, %2473, %2502 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2571 = torch.aten.view %2569, %2570 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2571, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2768 = torch.constant.int 1 | |
%2572 = torch.aten.size.int %2452, %int1_2768 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_2769 = torch.constant.int 1 | |
%int32_2770 = torch.constant.int 32 | |
%int100_2771 = torch.constant.int 100 | |
%2573 = torch.prim.ListConstruct %int1_2769, %int32_2770, %2572, %int100_2771 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2772 = torch.constant.bool false | |
%2574 = torch.aten.expand %2550, %2573, %false_2772 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2574, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2773 = torch.constant.int 32 | |
%int100_2774 = torch.constant.int 100 | |
%2575 = torch.prim.ListConstruct %int32_2773, %2572, %int100_2774 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2576 = torch.aten.view %2574, %2575 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2576, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%2577 = torch.aten.bmm %2571, %2576 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2577, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2775 = torch.constant.int 1 | |
%int32_2776 = torch.constant.int 32 | |
%int100_2777 = torch.constant.int 100 | |
%2578 = torch.prim.ListConstruct %int1_2775, %int32_2776, %2473, %int100_2777 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2579 = torch.aten.view %2577, %2578 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2579, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2778 = torch.constant.int 1 | |
%int2_2779 = torch.constant.int 2 | |
%2580 = torch.aten.transpose.int %2579, %int1_2778, %int2_2779 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2580, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_2780 = torch.constant.int 0 | |
%2581 = torch.aten.clone %2580, %int0_2780 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2581, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2781 = torch.constant.int 1 | |
%int3200_2782 = torch.constant.int 3200 | |
%2582 = torch.prim.ListConstruct %int1_2781, %2473, %int3200_2782 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2583 = torch.aten._unsafe_view %2581, %2582 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2583, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2783 = torch.constant.int -2 | |
%int-1_2784 = torch.constant.int -1 | |
%2584 = torch.aten.transpose.int %104, %int-2_2783, %int-1_2784 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2785 = torch.constant.int 3200 | |
%2585 = torch.prim.ListConstruct %2473, %int3200_2785 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2586 = torch.aten.view %2583, %2585 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2586, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2587 = torch.aten.mm %2586, %2584 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2587, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2786 = torch.constant.int 1 | |
%int3200_2787 = torch.constant.int 3200 | |
%2588 = torch.prim.ListConstruct %int1_2786, %2473, %int3200_2787 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2589 = torch.aten.view %2587, %2588 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2589, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2788 = torch.constant.int 1 | |
%2590 = torch.aten.add.Tensor %2425, %2589, %int1_2788 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2590, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2789 = torch.constant.int 6 | |
%2591 = torch.prims.convert_element_type %2590, %int6_2789 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2591, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2790 = torch.constant.int 2 | |
%2592 = torch.aten.pow.Tensor_Scalar %2591, %int2_2790 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2592, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2791 = torch.constant.int -1 | |
%2593 = torch.prim.ListConstruct %int-1_2791 : (!torch.int) -> !torch.list<int> | |
%true_2792 = torch.constant.bool true | |
%none_2793 = torch.constant.none | |
%2594 = torch.aten.mean.dim %2592, %2593, %true_2792, %none_2793 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2594, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2794 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2795 = torch.constant.int 1 | |
%2595 = torch.aten.add.Scalar %2594, %float9.999990e-07_2794, %int1_2795 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2595, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2596 = torch.aten.rsqrt %2595 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2596, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2597 = torch.aten.mul.Tensor %2591, %2596 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2597, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2598 = torch.aten.mul.Tensor %105, %2597 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2598, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2796 = torch.constant.int 5 | |
%2599 = torch.prims.convert_element_type %2598, %int5_2796 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2599, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2797 = torch.constant.int -2 | |
%int-1_2798 = torch.constant.int -1 | |
%2600 = torch.aten.transpose.int %106, %int-2_2797, %int-1_2798 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2799 = torch.constant.int 3200 | |
%2601 = torch.prim.ListConstruct %240, %int3200_2799 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2602 = torch.aten.view %2599, %2601 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2602, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2603 = torch.aten.mm %2602, %2600 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2603, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2800 = torch.constant.int 1 | |
%int8640_2801 = torch.constant.int 8640 | |
%2604 = torch.prim.ListConstruct %int1_2800, %240, %int8640_2801 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2605 = torch.aten.view %2603, %2604 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2605, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2606 = torch.aten.silu %2605 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2606, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2802 = torch.constant.int -2 | |
%int-1_2803 = torch.constant.int -1 | |
%2607 = torch.aten.transpose.int %107, %int-2_2802, %int-1_2803 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_2804 = torch.constant.int 3200 | |
%2608 = torch.prim.ListConstruct %240, %int3200_2804 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2609 = torch.aten.view %2599, %2608 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2609, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2610 = torch.aten.mm %2609, %2607 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2610, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_2805 = torch.constant.int 1 | |
%int8640_2806 = torch.constant.int 8640 | |
%2611 = torch.prim.ListConstruct %int1_2805, %240, %int8640_2806 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2612 = torch.aten.view %2610, %2611 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2612, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2613 = torch.aten.mul.Tensor %2606, %2612 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2613, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_2807 = torch.constant.int -2 | |
%int-1_2808 = torch.constant.int -1 | |
%2614 = torch.aten.transpose.int %108, %int-2_2807, %int-1_2808 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_2809 = torch.constant.int 1 | |
%2615 = torch.aten.size.int %2605, %int1_2809 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_2810 = torch.constant.int 8640 | |
%2616 = torch.prim.ListConstruct %2615, %int8640_2810 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2617 = torch.aten.view %2613, %2616 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2617, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%2618 = torch.aten.mm %2617, %2614 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2618, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2811 = torch.constant.int 1 | |
%int3200_2812 = torch.constant.int 3200 | |
%2619 = torch.prim.ListConstruct %int1_2811, %2615, %int3200_2812 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2620 = torch.aten.view %2618, %2619 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2620, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2813 = torch.constant.int 1 | |
%2621 = torch.aten.add.Tensor %2590, %2620, %int1_2813 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2621, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_2814 = torch.constant.int 6 | |
%2622 = torch.prims.convert_element_type %2621, %int6_2814 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2622, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_2815 = torch.constant.int 2 | |
%2623 = torch.aten.pow.Tensor_Scalar %2622, %int2_2815 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2623, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_2816 = torch.constant.int -1 | |
%2624 = torch.prim.ListConstruct %int-1_2816 : (!torch.int) -> !torch.list<int> | |
%true_2817 = torch.constant.bool true | |
%none_2818 = torch.constant.none | |
%2625 = torch.aten.mean.dim %2623, %2624, %true_2817, %none_2818 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2625, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_2819 = torch.constant.float 9.9999999747524271E-7 | |
%int1_2820 = torch.constant.int 1 | |
%2626 = torch.aten.add.Scalar %2625, %float9.999990e-07_2819, %int1_2820 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2626, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2627 = torch.aten.rsqrt %2626 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2627, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2628 = torch.aten.mul.Tensor %2622, %2627 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2628, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2629 = torch.aten.mul.Tensor %109, %2628 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2629, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_2821 = torch.constant.int 5 | |
%2630 = torch.prims.convert_element_type %2629, %int5_2821 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2630, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2822 = torch.constant.int -2 | |
%int-1_2823 = torch.constant.int -1 | |
%2631 = torch.aten.transpose.int %110, %int-2_2822, %int-1_2823 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2824 = torch.constant.int 3200 | |
%2632 = torch.prim.ListConstruct %240, %int3200_2824 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2633 = torch.aten.view %2630, %2632 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2633, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2634 = torch.aten.mm %2633, %2631 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2634, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2825 = torch.constant.int 1 | |
%int3200_2826 = torch.constant.int 3200 | |
%2635 = torch.prim.ListConstruct %int1_2825, %240, %int3200_2826 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2636 = torch.aten.view %2634, %2635 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2636, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2827 = torch.constant.int -2 | |
%int-1_2828 = torch.constant.int -1 | |
%2637 = torch.aten.transpose.int %111, %int-2_2827, %int-1_2828 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2829 = torch.constant.int 3200 | |
%2638 = torch.prim.ListConstruct %240, %int3200_2829 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2639 = torch.aten.view %2630, %2638 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2639, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2640 = torch.aten.mm %2639, %2637 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2640, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2830 = torch.constant.int 1 | |
%int3200_2831 = torch.constant.int 3200 | |
%2641 = torch.prim.ListConstruct %int1_2830, %240, %int3200_2831 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2642 = torch.aten.view %2640, %2641 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2642, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_2832 = torch.constant.int -2 | |
%int-1_2833 = torch.constant.int -1 | |
%2643 = torch.aten.transpose.int %112, %int-2_2832, %int-1_2833 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_2834 = torch.constant.int 3200 | |
%2644 = torch.prim.ListConstruct %240, %int3200_2834 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2645 = torch.aten.view %2630, %2644 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2645, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2646 = torch.aten.mm %2645, %2643 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2646, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_2835 = torch.constant.int 1 | |
%int3200_2836 = torch.constant.int 3200 | |
%2647 = torch.prim.ListConstruct %int1_2835, %240, %int3200_2836 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2648 = torch.aten.view %2646, %2647 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2648, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_2837 = torch.constant.int 1 | |
%int32_2838 = torch.constant.int 32 | |
%int100_2839 = torch.constant.int 100 | |
%2649 = torch.prim.ListConstruct %int1_2837, %240, %int32_2838, %int100_2839 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2650 = torch.aten.view %2636, %2649 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2650, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2840 = torch.constant.int 1 | |
%int32_2841 = torch.constant.int 32 | |
%int100_2842 = torch.constant.int 100 | |
%2651 = torch.prim.ListConstruct %int1_2840, %240, %int32_2841, %int100_2842 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2652 = torch.aten.view %2642, %2651 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2652, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_2843 = torch.constant.int 1 | |
%int32_2844 = torch.constant.int 32 | |
%int100_2845 = torch.constant.int 100 | |
%2653 = torch.prim.ListConstruct %int1_2843, %240, %int32_2844, %int100_2845 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2654 = torch.aten.view %2648, %2653 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2654, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2846 = torch.constant.int 2048 | |
%none_2847 = torch.constant.none | |
%none_2848 = torch.constant.none | |
%cpu_2849 = torch.constant.device "cpu" | |
%false_2850 = torch.constant.bool false | |
%2655 = torch.aten.arange %int2048_2846, %none_2847, %none_2848, %cpu_2849, %false_2850 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2851 = torch.constant.int 0 | |
%int100_2852 = torch.constant.int 100 | |
%int2_2853 = torch.constant.int 2 | |
%none_2854 = torch.constant.none | |
%none_2855 = torch.constant.none | |
%cpu_2856 = torch.constant.device "cpu" | |
%false_2857 = torch.constant.bool false | |
%2656 = torch.aten.arange.start_step %int0_2851, %int100_2852, %int2_2853, %none_2854, %none_2855, %cpu_2856, %false_2857 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2858 = torch.constant.int 0 | |
%int0_2859 = torch.constant.int 0 | |
%int50_2860 = torch.constant.int 50 | |
%int1_2861 = torch.constant.int 1 | |
%2657 = torch.aten.slice.Tensor %2656, %int0_2858, %int0_2859, %int50_2860, %int1_2861 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2862 = torch.constant.int 6 | |
%2658 = torch.prims.convert_element_type %2657, %int6_2862 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2863 = torch.constant.int 100 | |
%2659 = torch.aten.div.Scalar %2658, %int100_2863 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2864 = torch.constant.float 1.000000e+04 | |
%2660 = torch.aten.pow.Scalar %float1.000000e04_2864, %2659 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2661 = torch.aten.reciprocal %2660 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2865 = torch.constant.float 1.000000e+00 | |
%2662 = torch.aten.mul.Scalar %2661, %float1.000000e00_2865 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2866 = torch.constant.int 2048 | |
%int1_2867 = torch.constant.int 1 | |
%2663 = torch.prim.ListConstruct %int2048_2866, %int1_2867 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2664 = torch.aten.view %2655, %2663 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2665 = torch.aten.mul.Tensor %2664, %2662 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2666 = torch.aten.cos %2665 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2667 = torch.aten.sin %2665 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2668 = torch.aten.complex %2666, %2667 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2868 = torch.constant.int 1 | |
%2669 = torch.aten.size.int %2636, %int1_2868 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2869 = torch.constant.int 0 | |
%2670 = torch.aten.add.int %int0_2869, %2669 : !torch.int, !torch.int -> !torch.int | |
%int0_2870 = torch.constant.int 0 | |
%int0_2871 = torch.constant.int 0 | |
%int1_2872 = torch.constant.int 1 | |
%2671 = torch.aten.slice.Tensor %2668, %int0_2870, %int0_2871, %2670, %int1_2872 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2671, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2873 = torch.constant.int 1 | |
%int0_2874 = torch.constant.int 0 | |
%int9223372036854775807_2875 = torch.constant.int 9223372036854775807 | |
%int1_2876 = torch.constant.int 1 | |
%2672 = torch.aten.slice.Tensor %2671, %int1_2873, %int0_2874, %int9223372036854775807_2875, %int1_2876 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2672, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2877 = torch.constant.int 0 | |
%2673 = torch.aten.unsqueeze %2672, %int0_2877 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2673, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2878 = torch.constant.int 2 | |
%2674 = torch.aten.unsqueeze %2673, %int2_2878 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2674, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2879 = torch.constant.int 3 | |
%int0_2880 = torch.constant.int 0 | |
%int9223372036854775807_2881 = torch.constant.int 9223372036854775807 | |
%int1_2882 = torch.constant.int 1 | |
%2675 = torch.aten.slice.Tensor %2674, %int3_2879, %int0_2880, %int9223372036854775807_2881, %int1_2882 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2675, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2676 = torch_c.to_builtin_tensor %2650 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2883 = arith.constant 1 : index | |
%dim_2884 = tensor.dim %2676, %c1_2883 : tensor<1x?x32x100xf16> | |
%2677 = flow.tensor.bitcast %2676 : tensor<1x?x32x100xf16>{%dim_2884} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2884} | |
%2678 = torch_c.from_builtin_tensor %2677 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2678, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2679 = torch.aten.mul.Tensor %2678, %2675 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2679, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2680 = torch_c.to_builtin_tensor %2679 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2885 = arith.constant 1 : index | |
%dim_2886 = tensor.dim %2680, %c1_2885 : tensor<1x?x32x50xcomplex<f32>> | |
%2681 = flow.tensor.bitcast %2680 : tensor<1x?x32x50xcomplex<f32>>{%dim_2886} -> tensor<1x?x32x100xf32>{%dim_2886} | |
%2682 = torch_c.from_builtin_tensor %2681 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2682, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2887 = torch.constant.int 5 | |
%2683 = torch.prims.convert_element_type %2682, %int5_2887 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2683, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_2888 = torch.constant.int 2048 | |
%none_2889 = torch.constant.none | |
%none_2890 = torch.constant.none | |
%cpu_2891 = torch.constant.device "cpu" | |
%false_2892 = torch.constant.bool false | |
%2684 = torch.aten.arange %int2048_2888, %none_2889, %none_2890, %cpu_2891, %false_2892 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_2893 = torch.constant.int 0 | |
%int100_2894 = torch.constant.int 100 | |
%int2_2895 = torch.constant.int 2 | |
%none_2896 = torch.constant.none | |
%none_2897 = torch.constant.none | |
%cpu_2898 = torch.constant.device "cpu" | |
%false_2899 = torch.constant.bool false | |
%2685 = torch.aten.arange.start_step %int0_2893, %int100_2894, %int2_2895, %none_2896, %none_2897, %cpu_2898, %false_2899 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_2900 = torch.constant.int 0 | |
%int0_2901 = torch.constant.int 0 | |
%int50_2902 = torch.constant.int 50 | |
%int1_2903 = torch.constant.int 1 | |
%2686 = torch.aten.slice.Tensor %2685, %int0_2900, %int0_2901, %int50_2902, %int1_2903 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_2904 = torch.constant.int 6 | |
%2687 = torch.prims.convert_element_type %2686, %int6_2904 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_2905 = torch.constant.int 100 | |
%2688 = torch.aten.div.Scalar %2687, %int100_2905 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_2906 = torch.constant.float 1.000000e+04 | |
%2689 = torch.aten.pow.Scalar %float1.000000e04_2906, %2688 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2690 = torch.aten.reciprocal %2689 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_2907 = torch.constant.float 1.000000e+00 | |
%2691 = torch.aten.mul.Scalar %2690, %float1.000000e00_2907 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_2908 = torch.constant.int 2048 | |
%int1_2909 = torch.constant.int 1 | |
%2692 = torch.prim.ListConstruct %int2048_2908, %int1_2909 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2693 = torch.aten.view %2684, %2692 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2694 = torch.aten.mul.Tensor %2693, %2691 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2695 = torch.aten.cos %2694 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2696 = torch.aten.sin %2694 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2697 = torch.aten.complex %2695, %2696 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_2910 = torch.constant.int 1 | |
%2698 = torch.aten.size.int %2642, %int1_2910 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_2911 = torch.constant.int 0 | |
%2699 = torch.aten.add.int %int0_2911, %2698 : !torch.int, !torch.int -> !torch.int | |
%int0_2912 = torch.constant.int 0 | |
%int0_2913 = torch.constant.int 0 | |
%int1_2914 = torch.constant.int 1 | |
%2700 = torch.aten.slice.Tensor %2697, %int0_2912, %int0_2913, %2699, %int1_2914 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2700, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_2915 = torch.constant.int 1 | |
%int0_2916 = torch.constant.int 0 | |
%int9223372036854775807_2917 = torch.constant.int 9223372036854775807 | |
%int1_2918 = torch.constant.int 1 | |
%2701 = torch.aten.slice.Tensor %2700, %int1_2915, %int0_2916, %int9223372036854775807_2917, %int1_2918 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2701, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_2919 = torch.constant.int 0 | |
%2702 = torch.aten.unsqueeze %2701, %int0_2919 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2702, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_2920 = torch.constant.int 2 | |
%2703 = torch.aten.unsqueeze %2702, %int2_2920 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2703, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_2921 = torch.constant.int 3 | |
%int0_2922 = torch.constant.int 0 | |
%int9223372036854775807_2923 = torch.constant.int 9223372036854775807 | |
%int1_2924 = torch.constant.int 1 | |
%2704 = torch.aten.slice.Tensor %2703, %int3_2921, %int0_2922, %int9223372036854775807_2923, %int1_2924 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2704, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2705 = torch_c.to_builtin_tensor %2652 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_2925 = arith.constant 1 : index | |
%dim_2926 = tensor.dim %2705, %c1_2925 : tensor<1x?x32x100xf16> | |
%2706 = flow.tensor.bitcast %2705 : tensor<1x?x32x100xf16>{%dim_2926} -> tensor<1x?x32x50xcomplex<f16>>{%dim_2926} | |
%2707 = torch_c.from_builtin_tensor %2706 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2707, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2708 = torch.aten.mul.Tensor %2707, %2704 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2708, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2709 = torch_c.to_builtin_tensor %2708 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_2927 = arith.constant 1 : index | |
%dim_2928 = tensor.dim %2709, %c1_2927 : tensor<1x?x32x50xcomplex<f32>> | |
%2710 = flow.tensor.bitcast %2709 : tensor<1x?x32x50xcomplex<f32>>{%dim_2928} -> tensor<1x?x32x100xf32>{%dim_2928} | |
%2711 = torch_c.from_builtin_tensor %2710 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2711, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_2929 = torch.constant.int 5 | |
%2712 = torch.prims.convert_element_type %2711, %int5_2929 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2712, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_2930 = torch.constant.int 52 | |
%2713 = torch.aten.mul.Scalar %arg2, %int52_2930 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2713, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int24 = torch.constant.int 24 | |
%int1_2931 = torch.constant.int 1 | |
%2714 = torch.aten.add.Scalar %2713, %int24, %int1_2931 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2714, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_2932 = torch.constant.int 1 | |
%int16_2933 = torch.constant.int 16 | |
%int32_2934 = torch.constant.int 32 | |
%int100_2935 = torch.constant.int 100 | |
%2715 = torch.prim.ListConstruct %int1_2932, %368, %int16_2933, %int32_2934, %int100_2935 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2716 = torch.aten.view %2712, %2715 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2716, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2936 = torch.constant.int 16 | |
%int32_2937 = torch.constant.int 32 | |
%int100_2938 = torch.constant.int 100 | |
%2717 = torch.prim.ListConstruct %368, %int16_2936, %int32_2937, %int100_2938 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2718 = torch.aten.view %2716, %2717 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2718, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2719 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2720 = torch.aten.view %2714, %2719 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2720, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_2939 = torch.constant.int 1 | |
%int16_2940 = torch.constant.int 16 | |
%int32_2941 = torch.constant.int 32 | |
%int100_2942 = torch.constant.int 100 | |
%2721 = torch.prim.ListConstruct %int1_2939, %368, %int16_2940, %int32_2941, %int100_2942 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2722 = torch.aten.view %2654, %2721 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2722, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_2943 = torch.constant.int 16 | |
%int32_2944 = torch.constant.int 32 | |
%int100_2945 = torch.constant.int 100 | |
%2723 = torch.prim.ListConstruct %368, %int16_2943, %int32_2944, %int100_2945 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2724 = torch.aten.view %2722, %2723 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2724, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_2946 = torch.constant.int 1 | |
%int1_2947 = torch.constant.int 1 | |
%2725 = torch.aten.add.Scalar %2714, %int1_2946, %int1_2947 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2725, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%2726 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2727 = torch.aten.view %2725, %2726 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2727, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%2728 = torch.prim.ListConstruct %2720, %2727 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_2948 = torch.constant.int 0 | |
%2729 = torch.aten.cat %2728, %int0_2948 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2729, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%2730 = torch.prim.ListConstruct %2718, %2724 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_2949 = torch.constant.int 0 | |
%2731 = torch.aten.cat %2730, %int0_2949 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2731, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2950 = torch.constant.int 26 | |
%int2_2951 = torch.constant.int 2 | |
%int16_2952 = torch.constant.int 16 | |
%int32_2953 = torch.constant.int 32 | |
%int100_2954 = torch.constant.int 100 | |
%2732 = torch.prim.ListConstruct %359, %int26_2950, %int2_2951, %int16_2952, %int32_2953, %int100_2954 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2733 = torch.aten.view %2547, %2732 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2733, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_2955 = torch.constant.int 26 | |
%2734 = torch.aten.mul.int %359, %int26_2955 : !torch.int, !torch.int -> !torch.int | |
%int2_2956 = torch.constant.int 2 | |
%2735 = torch.aten.mul.int %2734, %int2_2956 : !torch.int, !torch.int -> !torch.int | |
%int16_2957 = torch.constant.int 16 | |
%int32_2958 = torch.constant.int 32 | |
%int100_2959 = torch.constant.int 100 | |
%2736 = torch.prim.ListConstruct %2735, %int16_2957, %int32_2958, %int100_2959 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2737 = torch.aten.view %2733, %2736 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2737, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2738 = torch.prim.ListConstruct %2729 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_2960 = torch.constant.bool false | |
%2739 = torch.aten.index_put %2737, %2738, %2731, %false_2960 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2739, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_2961 = torch.constant.int 26 | |
%int2_2962 = torch.constant.int 2 | |
%int16_2963 = torch.constant.int 16 | |
%int32_2964 = torch.constant.int 32 | |
%int100_2965 = torch.constant.int 100 | |
%2740 = torch.prim.ListConstruct %359, %int26_2961, %int2_2962, %int16_2963, %int32_2964, %int100_2965 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2741 = torch.aten.view %2739, %2740 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2741, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_2966 = torch.constant.int 2662400 | |
%2742 = torch.prim.ListConstruct %359, %int2662400_2966 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2743 = torch.aten.view %2741, %2742 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %2743, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_2967 = torch.constant.int 1 | |
%int2_2968 = torch.constant.int 2 | |
%2744 = torch.aten.transpose.int %2683, %int1_2967, %int2_2968 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2744, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2969 = torch.constant.int 1 | |
%int2_2970 = torch.constant.int 2 | |
%2745 = torch.aten.transpose.int %2712, %int1_2969, %int2_2970 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2745, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_2971 = torch.constant.int 1 | |
%int2_2972 = torch.constant.int 2 | |
%2746 = torch.aten.transpose.int %2654, %int1_2971, %int2_2972 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2746, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_2973 = torch.constant.int 2 | |
%int3_2974 = torch.constant.int 3 | |
%2747 = torch.aten.transpose.int %2745, %int2_2973, %int3_2974 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2747, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_2975 = torch.constant.int 1 | |
%int32_2976 = torch.constant.int 32 | |
%int100_2977 = torch.constant.int 100 | |
%2748 = torch.prim.ListConstruct %int1_2975, %int32_2976, %2669, %int100_2977 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2978 = torch.constant.bool false | |
%2749 = torch.aten.expand %2744, %2748, %false_2978 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2749, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_2979 = torch.constant.int 32 | |
%int100_2980 = torch.constant.int 100 | |
%2750 = torch.prim.ListConstruct %int32_2979, %2669, %int100_2980 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2751 = torch.aten.view %2749, %2750 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2751, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_2981 = torch.constant.int 1 | |
%int32_2982 = torch.constant.int 32 | |
%int100_2983 = torch.constant.int 100 | |
%2752 = torch.prim.ListConstruct %int1_2981, %int32_2982, %int100_2983, %2698 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2984 = torch.constant.bool false | |
%2753 = torch.aten.expand %2747, %2752, %false_2984 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2753, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_2985 = torch.constant.int 32 | |
%int100_2986 = torch.constant.int 100 | |
%2754 = torch.prim.ListConstruct %int32_2985, %int100_2986, %2698 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2755 = torch.aten.view %2753, %2754 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %2755, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%2756 = torch.aten.bmm %2751, %2755 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2756, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2987 = torch.constant.int 1 | |
%int32_2988 = torch.constant.int 32 | |
%2757 = torch.prim.ListConstruct %int1_2987, %int32_2988, %2669, %2698 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2758 = torch.aten.view %2756, %2757 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2758, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_2989 = torch.constant.float 1.000000e+01 | |
%2759 = torch.aten.div.Scalar %2758, %float1.000000e01_2989 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2759, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2990 = torch.constant.int 1 | |
%2760 = torch.aten.add.Tensor %2759, %266, %int1_2990 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2760, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_2991 = torch.constant.int 6 | |
%2761 = torch.prims.convert_element_type %2760, %int6_2991 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2761, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_2992 = torch.constant.int -1 | |
%false_2993 = torch.constant.bool false | |
%2762 = torch.aten._softmax %2761, %int-1_2992, %false_2993 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2762, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_2994 = torch.constant.int 5 | |
%2763 = torch.prims.convert_element_type %2762, %int5_2994 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2763, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_2995 = torch.constant.int 1 | |
%int32_2996 = torch.constant.int 32 | |
%2764 = torch.prim.ListConstruct %int1_2995, %int32_2996, %2669, %2698 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_2997 = torch.constant.bool false | |
%2765 = torch.aten.expand %2763, %2764, %false_2997 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2765, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_2998 = torch.constant.int 32 | |
%2766 = torch.prim.ListConstruct %int32_2998, %2669, %2698 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2767 = torch.aten.view %2765, %2766 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2767, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_2999 = torch.constant.int 1 | |
%2768 = torch.aten.size.int %2648, %int1_2999 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_3000 = torch.constant.int 1 | |
%int32_3001 = torch.constant.int 32 | |
%int100_3002 = torch.constant.int 100 | |
%2769 = torch.prim.ListConstruct %int1_3000, %int32_3001, %2768, %int100_3002 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3003 = torch.constant.bool false | |
%2770 = torch.aten.expand %2746, %2769, %false_3003 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2770, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3004 = torch.constant.int 32 | |
%int100_3005 = torch.constant.int 100 | |
%2771 = torch.prim.ListConstruct %int32_3004, %2768, %int100_3005 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2772 = torch.aten.view %2770, %2771 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2772, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%2773 = torch.aten.bmm %2767, %2772 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2773, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3006 = torch.constant.int 1 | |
%int32_3007 = torch.constant.int 32 | |
%int100_3008 = torch.constant.int 100 | |
%2774 = torch.prim.ListConstruct %int1_3006, %int32_3007, %2669, %int100_3008 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2775 = torch.aten.view %2773, %2774 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2775, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3009 = torch.constant.int 1 | |
%int2_3010 = torch.constant.int 2 | |
%2776 = torch.aten.transpose.int %2775, %int1_3009, %int2_3010 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2776, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_3011 = torch.constant.int 0 | |
%2777 = torch.aten.clone %2776, %int0_3011 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2777, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3012 = torch.constant.int 1 | |
%int3200_3013 = torch.constant.int 3200 | |
%2778 = torch.prim.ListConstruct %int1_3012, %2669, %int3200_3013 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2779 = torch.aten._unsafe_view %2777, %2778 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2779, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3014 = torch.constant.int -2 | |
%int-1_3015 = torch.constant.int -1 | |
%2780 = torch.aten.transpose.int %113, %int-2_3014, %int-1_3015 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3016 = torch.constant.int 3200 | |
%2781 = torch.prim.ListConstruct %2669, %int3200_3016 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2782 = torch.aten.view %2779, %2781 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2782, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2783 = torch.aten.mm %2782, %2780 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2783, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3017 = torch.constant.int 1 | |
%int3200_3018 = torch.constant.int 3200 | |
%2784 = torch.prim.ListConstruct %int1_3017, %2669, %int3200_3018 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2785 = torch.aten.view %2783, %2784 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2785, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3019 = torch.constant.int 1 | |
%2786 = torch.aten.add.Tensor %2621, %2785, %int1_3019 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2786, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3020 = torch.constant.int 6 | |
%2787 = torch.prims.convert_element_type %2786, %int6_3020 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2787, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3021 = torch.constant.int 2 | |
%2788 = torch.aten.pow.Tensor_Scalar %2787, %int2_3021 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2788, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3022 = torch.constant.int -1 | |
%2789 = torch.prim.ListConstruct %int-1_3022 : (!torch.int) -> !torch.list<int> | |
%true_3023 = torch.constant.bool true | |
%none_3024 = torch.constant.none | |
%2790 = torch.aten.mean.dim %2788, %2789, %true_3023, %none_3024 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2790, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3025 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3026 = torch.constant.int 1 | |
%2791 = torch.aten.add.Scalar %2790, %float9.999990e-07_3025, %int1_3026 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2791, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2792 = torch.aten.rsqrt %2791 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2792, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2793 = torch.aten.mul.Tensor %2787, %2792 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2793, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2794 = torch.aten.mul.Tensor %114, %2793 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2794, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3027 = torch.constant.int 5 | |
%2795 = torch.prims.convert_element_type %2794, %int5_3027 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2795, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3028 = torch.constant.int -2 | |
%int-1_3029 = torch.constant.int -1 | |
%2796 = torch.aten.transpose.int %115, %int-2_3028, %int-1_3029 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3030 = torch.constant.int 3200 | |
%2797 = torch.prim.ListConstruct %240, %int3200_3030 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2798 = torch.aten.view %2795, %2797 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2798, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2799 = torch.aten.mm %2798, %2796 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2799, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3031 = torch.constant.int 1 | |
%int8640_3032 = torch.constant.int 8640 | |
%2800 = torch.prim.ListConstruct %int1_3031, %240, %int8640_3032 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2801 = torch.aten.view %2799, %2800 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2801, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2802 = torch.aten.silu %2801 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2802, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3033 = torch.constant.int -2 | |
%int-1_3034 = torch.constant.int -1 | |
%2803 = torch.aten.transpose.int %116, %int-2_3033, %int-1_3034 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3035 = torch.constant.int 3200 | |
%2804 = torch.prim.ListConstruct %240, %int3200_3035 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2805 = torch.aten.view %2795, %2804 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2805, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2806 = torch.aten.mm %2805, %2803 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2806, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3036 = torch.constant.int 1 | |
%int8640_3037 = torch.constant.int 8640 | |
%2807 = torch.prim.ListConstruct %int1_3036, %240, %int8640_3037 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2808 = torch.aten.view %2806, %2807 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2808, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2809 = torch.aten.mul.Tensor %2802, %2808 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2809, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3038 = torch.constant.int -2 | |
%int-1_3039 = torch.constant.int -1 | |
%2810 = torch.aten.transpose.int %117, %int-2_3038, %int-1_3039 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_3040 = torch.constant.int 1 | |
%2811 = torch.aten.size.int %2801, %int1_3040 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_3041 = torch.constant.int 8640 | |
%2812 = torch.prim.ListConstruct %2811, %int8640_3041 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2813 = torch.aten.view %2809, %2812 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2813, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%2814 = torch.aten.mm %2813, %2810 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2814, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3042 = torch.constant.int 1 | |
%int3200_3043 = torch.constant.int 3200 | |
%2815 = torch.prim.ListConstruct %int1_3042, %2811, %int3200_3043 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2816 = torch.aten.view %2814, %2815 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2816, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3044 = torch.constant.int 1 | |
%2817 = torch.aten.add.Tensor %2786, %2816, %int1_3044 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2817, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3045 = torch.constant.int 6 | |
%2818 = torch.prims.convert_element_type %2817, %int6_3045 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2818, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3046 = torch.constant.int 2 | |
%2819 = torch.aten.pow.Tensor_Scalar %2818, %int2_3046 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2819, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3047 = torch.constant.int -1 | |
%2820 = torch.prim.ListConstruct %int-1_3047 : (!torch.int) -> !torch.list<int> | |
%true_3048 = torch.constant.bool true | |
%none_3049 = torch.constant.none | |
%2821 = torch.aten.mean.dim %2819, %2820, %true_3048, %none_3049 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2821, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3050 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3051 = torch.constant.int 1 | |
%2822 = torch.aten.add.Scalar %2821, %float9.999990e-07_3050, %int1_3051 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2822, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2823 = torch.aten.rsqrt %2822 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2823, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2824 = torch.aten.mul.Tensor %2818, %2823 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2824, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2825 = torch.aten.mul.Tensor %118, %2824 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2825, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3052 = torch.constant.int 5 | |
%2826 = torch.prims.convert_element_type %2825, %int5_3052 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2826, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3053 = torch.constant.int -2 | |
%int-1_3054 = torch.constant.int -1 | |
%2827 = torch.aten.transpose.int %119, %int-2_3053, %int-1_3054 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3055 = torch.constant.int 3200 | |
%2828 = torch.prim.ListConstruct %240, %int3200_3055 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2829 = torch.aten.view %2826, %2828 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2829, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2830 = torch.aten.mm %2829, %2827 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2830, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3056 = torch.constant.int 1 | |
%int3200_3057 = torch.constant.int 3200 | |
%2831 = torch.prim.ListConstruct %int1_3056, %240, %int3200_3057 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2832 = torch.aten.view %2830, %2831 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2832, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3058 = torch.constant.int -2 | |
%int-1_3059 = torch.constant.int -1 | |
%2833 = torch.aten.transpose.int %120, %int-2_3058, %int-1_3059 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3060 = torch.constant.int 3200 | |
%2834 = torch.prim.ListConstruct %240, %int3200_3060 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2835 = torch.aten.view %2826, %2834 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2835, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2836 = torch.aten.mm %2835, %2833 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2836, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3061 = torch.constant.int 1 | |
%int3200_3062 = torch.constant.int 3200 | |
%2837 = torch.prim.ListConstruct %int1_3061, %240, %int3200_3062 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2838 = torch.aten.view %2836, %2837 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2838, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3063 = torch.constant.int -2 | |
%int-1_3064 = torch.constant.int -1 | |
%2839 = torch.aten.transpose.int %121, %int-2_3063, %int-1_3064 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3065 = torch.constant.int 3200 | |
%2840 = torch.prim.ListConstruct %240, %int3200_3065 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2841 = torch.aten.view %2826, %2840 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2841, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2842 = torch.aten.mm %2841, %2839 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2842, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3066 = torch.constant.int 1 | |
%int3200_3067 = torch.constant.int 3200 | |
%2843 = torch.prim.ListConstruct %int1_3066, %240, %int3200_3067 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2844 = torch.aten.view %2842, %2843 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2844, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3068 = torch.constant.int 1 | |
%int32_3069 = torch.constant.int 32 | |
%int100_3070 = torch.constant.int 100 | |
%2845 = torch.prim.ListConstruct %int1_3068, %240, %int32_3069, %int100_3070 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2846 = torch.aten.view %2832, %2845 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2846, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3071 = torch.constant.int 1 | |
%int32_3072 = torch.constant.int 32 | |
%int100_3073 = torch.constant.int 100 | |
%2847 = torch.prim.ListConstruct %int1_3071, %240, %int32_3072, %int100_3073 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2848 = torch.aten.view %2838, %2847 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2848, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3074 = torch.constant.int 1 | |
%int32_3075 = torch.constant.int 32 | |
%int100_3076 = torch.constant.int 100 | |
%2849 = torch.prim.ListConstruct %int1_3074, %240, %int32_3075, %int100_3076 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2850 = torch.aten.view %2844, %2849 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2850, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3077 = torch.constant.int 2048 | |
%none_3078 = torch.constant.none | |
%none_3079 = torch.constant.none | |
%cpu_3080 = torch.constant.device "cpu" | |
%false_3081 = torch.constant.bool false | |
%2851 = torch.aten.arange %int2048_3077, %none_3078, %none_3079, %cpu_3080, %false_3081 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3082 = torch.constant.int 0 | |
%int100_3083 = torch.constant.int 100 | |
%int2_3084 = torch.constant.int 2 | |
%none_3085 = torch.constant.none | |
%none_3086 = torch.constant.none | |
%cpu_3087 = torch.constant.device "cpu" | |
%false_3088 = torch.constant.bool false | |
%2852 = torch.aten.arange.start_step %int0_3082, %int100_3083, %int2_3084, %none_3085, %none_3086, %cpu_3087, %false_3088 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3089 = torch.constant.int 0 | |
%int0_3090 = torch.constant.int 0 | |
%int50_3091 = torch.constant.int 50 | |
%int1_3092 = torch.constant.int 1 | |
%2853 = torch.aten.slice.Tensor %2852, %int0_3089, %int0_3090, %int50_3091, %int1_3092 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3093 = torch.constant.int 6 | |
%2854 = torch.prims.convert_element_type %2853, %int6_3093 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3094 = torch.constant.int 100 | |
%2855 = torch.aten.div.Scalar %2854, %int100_3094 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3095 = torch.constant.float 1.000000e+04 | |
%2856 = torch.aten.pow.Scalar %float1.000000e04_3095, %2855 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2857 = torch.aten.reciprocal %2856 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3096 = torch.constant.float 1.000000e+00 | |
%2858 = torch.aten.mul.Scalar %2857, %float1.000000e00_3096 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3097 = torch.constant.int 2048 | |
%int1_3098 = torch.constant.int 1 | |
%2859 = torch.prim.ListConstruct %int2048_3097, %int1_3098 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2860 = torch.aten.view %2851, %2859 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2861 = torch.aten.mul.Tensor %2860, %2858 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2862 = torch.aten.cos %2861 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2863 = torch.aten.sin %2861 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2864 = torch.aten.complex %2862, %2863 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3099 = torch.constant.int 1 | |
%2865 = torch.aten.size.int %2832, %int1_3099 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3100 = torch.constant.int 0 | |
%2866 = torch.aten.add.int %int0_3100, %2865 : !torch.int, !torch.int -> !torch.int | |
%int0_3101 = torch.constant.int 0 | |
%int0_3102 = torch.constant.int 0 | |
%int1_3103 = torch.constant.int 1 | |
%2867 = torch.aten.slice.Tensor %2864, %int0_3101, %int0_3102, %2866, %int1_3103 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2867, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3104 = torch.constant.int 1 | |
%int0_3105 = torch.constant.int 0 | |
%int9223372036854775807_3106 = torch.constant.int 9223372036854775807 | |
%int1_3107 = torch.constant.int 1 | |
%2868 = torch.aten.slice.Tensor %2867, %int1_3104, %int0_3105, %int9223372036854775807_3106, %int1_3107 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2868, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3108 = torch.constant.int 0 | |
%2869 = torch.aten.unsqueeze %2868, %int0_3108 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2869, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3109 = torch.constant.int 2 | |
%2870 = torch.aten.unsqueeze %2869, %int2_3109 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2870, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3110 = torch.constant.int 3 | |
%int0_3111 = torch.constant.int 0 | |
%int9223372036854775807_3112 = torch.constant.int 9223372036854775807 | |
%int1_3113 = torch.constant.int 1 | |
%2871 = torch.aten.slice.Tensor %2870, %int3_3110, %int0_3111, %int9223372036854775807_3112, %int1_3113 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2871, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2872 = torch_c.to_builtin_tensor %2846 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3114 = arith.constant 1 : index | |
%dim_3115 = tensor.dim %2872, %c1_3114 : tensor<1x?x32x100xf16> | |
%2873 = flow.tensor.bitcast %2872 : tensor<1x?x32x100xf16>{%dim_3115} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3115} | |
%2874 = torch_c.from_builtin_tensor %2873 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2874, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2875 = torch.aten.mul.Tensor %2874, %2871 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2875, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2876 = torch_c.to_builtin_tensor %2875 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3116 = arith.constant 1 : index | |
%dim_3117 = tensor.dim %2876, %c1_3116 : tensor<1x?x32x50xcomplex<f32>> | |
%2877 = flow.tensor.bitcast %2876 : tensor<1x?x32x50xcomplex<f32>>{%dim_3117} -> tensor<1x?x32x100xf32>{%dim_3117} | |
%2878 = torch_c.from_builtin_tensor %2877 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2878, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3118 = torch.constant.int 5 | |
%2879 = torch.prims.convert_element_type %2878, %int5_3118 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2879, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3119 = torch.constant.int 2048 | |
%none_3120 = torch.constant.none | |
%none_3121 = torch.constant.none | |
%cpu_3122 = torch.constant.device "cpu" | |
%false_3123 = torch.constant.bool false | |
%2880 = torch.aten.arange %int2048_3119, %none_3120, %none_3121, %cpu_3122, %false_3123 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3124 = torch.constant.int 0 | |
%int100_3125 = torch.constant.int 100 | |
%int2_3126 = torch.constant.int 2 | |
%none_3127 = torch.constant.none | |
%none_3128 = torch.constant.none | |
%cpu_3129 = torch.constant.device "cpu" | |
%false_3130 = torch.constant.bool false | |
%2881 = torch.aten.arange.start_step %int0_3124, %int100_3125, %int2_3126, %none_3127, %none_3128, %cpu_3129, %false_3130 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3131 = torch.constant.int 0 | |
%int0_3132 = torch.constant.int 0 | |
%int50_3133 = torch.constant.int 50 | |
%int1_3134 = torch.constant.int 1 | |
%2882 = torch.aten.slice.Tensor %2881, %int0_3131, %int0_3132, %int50_3133, %int1_3134 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3135 = torch.constant.int 6 | |
%2883 = torch.prims.convert_element_type %2882, %int6_3135 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3136 = torch.constant.int 100 | |
%2884 = torch.aten.div.Scalar %2883, %int100_3136 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3137 = torch.constant.float 1.000000e+04 | |
%2885 = torch.aten.pow.Scalar %float1.000000e04_3137, %2884 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%2886 = torch.aten.reciprocal %2885 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3138 = torch.constant.float 1.000000e+00 | |
%2887 = torch.aten.mul.Scalar %2886, %float1.000000e00_3138 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3139 = torch.constant.int 2048 | |
%int1_3140 = torch.constant.int 1 | |
%2888 = torch.prim.ListConstruct %int2048_3139, %int1_3140 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2889 = torch.aten.view %2880, %2888 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%2890 = torch.aten.mul.Tensor %2889, %2887 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2891 = torch.aten.cos %2890 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2892 = torch.aten.sin %2890 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%2893 = torch.aten.complex %2891, %2892 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3141 = torch.constant.int 1 | |
%2894 = torch.aten.size.int %2838, %int1_3141 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3142 = torch.constant.int 0 | |
%2895 = torch.aten.add.int %int0_3142, %2894 : !torch.int, !torch.int -> !torch.int | |
%int0_3143 = torch.constant.int 0 | |
%int0_3144 = torch.constant.int 0 | |
%int1_3145 = torch.constant.int 1 | |
%2896 = torch.aten.slice.Tensor %2893, %int0_3143, %int0_3144, %2895, %int1_3145 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2896, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3146 = torch.constant.int 1 | |
%int0_3147 = torch.constant.int 0 | |
%int9223372036854775807_3148 = torch.constant.int 9223372036854775807 | |
%int1_3149 = torch.constant.int 1 | |
%2897 = torch.aten.slice.Tensor %2896, %int1_3146, %int0_3147, %int9223372036854775807_3148, %int1_3149 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %2897, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3150 = torch.constant.int 0 | |
%2898 = torch.aten.unsqueeze %2897, %int0_3150 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %2898, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3151 = torch.constant.int 2 | |
%2899 = torch.aten.unsqueeze %2898, %int2_3151 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2899, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3152 = torch.constant.int 3 | |
%int0_3153 = torch.constant.int 0 | |
%int9223372036854775807_3154 = torch.constant.int 9223372036854775807 | |
%int1_3155 = torch.constant.int 1 | |
%2900 = torch.aten.slice.Tensor %2899, %int3_3152, %int0_3153, %int9223372036854775807_3154, %int1_3155 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %2900, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%2901 = torch_c.to_builtin_tensor %2848 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3156 = arith.constant 1 : index | |
%dim_3157 = tensor.dim %2901, %c1_3156 : tensor<1x?x32x100xf16> | |
%2902 = flow.tensor.bitcast %2901 : tensor<1x?x32x100xf16>{%dim_3157} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3157} | |
%2903 = torch_c.from_builtin_tensor %2902 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %2903, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%2904 = torch.aten.mul.Tensor %2903, %2900 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %2904, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%2905 = torch_c.to_builtin_tensor %2904 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3158 = arith.constant 1 : index | |
%dim_3159 = tensor.dim %2905, %c1_3158 : tensor<1x?x32x50xcomplex<f32>> | |
%2906 = flow.tensor.bitcast %2905 : tensor<1x?x32x50xcomplex<f32>>{%dim_3159} -> tensor<1x?x32x100xf32>{%dim_3159} | |
%2907 = torch_c.from_builtin_tensor %2906 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %2907, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3160 = torch.constant.int 5 | |
%2908 = torch.prims.convert_element_type %2907, %int5_3160 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2908, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_3161 = torch.constant.int 52 | |
%2909 = torch.aten.mul.Scalar %arg2, %int52_3161 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2909, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int26_3162 = torch.constant.int 26 | |
%int1_3163 = torch.constant.int 1 | |
%2910 = torch.aten.add.Scalar %2909, %int26_3162, %int1_3163 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2910, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_3164 = torch.constant.int 1 | |
%int16_3165 = torch.constant.int 16 | |
%int32_3166 = torch.constant.int 32 | |
%int100_3167 = torch.constant.int 100 | |
%2911 = torch.prim.ListConstruct %int1_3164, %368, %int16_3165, %int32_3166, %int100_3167 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2912 = torch.aten.view %2908, %2911 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2912, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3168 = torch.constant.int 16 | |
%int32_3169 = torch.constant.int 32 | |
%int100_3170 = torch.constant.int 100 | |
%2913 = torch.prim.ListConstruct %368, %int16_3168, %int32_3169, %int100_3170 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2914 = torch.aten.view %2912, %2913 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2914, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2915 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2916 = torch.aten.view %2910, %2915 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2916, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_3171 = torch.constant.int 1 | |
%int16_3172 = torch.constant.int 16 | |
%int32_3173 = torch.constant.int 32 | |
%int100_3174 = torch.constant.int 100 | |
%2917 = torch.prim.ListConstruct %int1_3171, %368, %int16_3172, %int32_3173, %int100_3174 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2918 = torch.aten.view %2850, %2917 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %2918, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3175 = torch.constant.int 16 | |
%int32_3176 = torch.constant.int 32 | |
%int100_3177 = torch.constant.int 100 | |
%2919 = torch.prim.ListConstruct %368, %int16_3175, %int32_3176, %int100_3177 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2920 = torch.aten.view %2918, %2919 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2920, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_3178 = torch.constant.int 1 | |
%int1_3179 = torch.constant.int 1 | |
%2921 = torch.aten.add.Scalar %2910, %int1_3178, %int1_3179 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %2921, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%2922 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%2923 = torch.aten.view %2921, %2922 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2923, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%2924 = torch.prim.ListConstruct %2916, %2923 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_3180 = torch.constant.int 0 | |
%2925 = torch.aten.cat %2924, %int0_3180 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %2925, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%2926 = torch.prim.ListConstruct %2914, %2920 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_3181 = torch.constant.int 0 | |
%2927 = torch.aten.cat %2926, %int0_3181 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2927, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3182 = torch.constant.int 26 | |
%int2_3183 = torch.constant.int 2 | |
%int16_3184 = torch.constant.int 16 | |
%int32_3185 = torch.constant.int 32 | |
%int100_3186 = torch.constant.int 100 | |
%2928 = torch.prim.ListConstruct %359, %int26_3182, %int2_3183, %int16_3184, %int32_3185, %int100_3186 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2929 = torch.aten.view %2743, %2928 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2929, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_3187 = torch.constant.int 26 | |
%2930 = torch.aten.mul.int %359, %int26_3187 : !torch.int, !torch.int -> !torch.int | |
%int2_3188 = torch.constant.int 2 | |
%2931 = torch.aten.mul.int %2930, %int2_3188 : !torch.int, !torch.int -> !torch.int | |
%int16_3189 = torch.constant.int 16 | |
%int32_3190 = torch.constant.int 32 | |
%int100_3191 = torch.constant.int 100 | |
%2932 = torch.prim.ListConstruct %2931, %int16_3189, %int32_3190, %int100_3191 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2933 = torch.aten.view %2929, %2932 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2933, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%2934 = torch.prim.ListConstruct %2925 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_3192 = torch.constant.bool false | |
%2935 = torch.aten.index_put %2933, %2934, %2927, %false_3192 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %2935, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3193 = torch.constant.int 26 | |
%int2_3194 = torch.constant.int 2 | |
%int16_3195 = torch.constant.int 16 | |
%int32_3196 = torch.constant.int 32 | |
%int100_3197 = torch.constant.int 100 | |
%2936 = torch.prim.ListConstruct %359, %int26_3193, %int2_3194, %int16_3195, %int32_3196, %int100_3197 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2937 = torch.aten.view %2935, %2936 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %2937, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_3198 = torch.constant.int 2662400 | |
%2938 = torch.prim.ListConstruct %359, %int2662400_3198 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2939 = torch.aten.view %2937, %2938 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %2939, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_3199 = torch.constant.int 1 | |
%int2_3200 = torch.constant.int 2 | |
%2940 = torch.aten.transpose.int %2879, %int1_3199, %int2_3200 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2940, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3201 = torch.constant.int 1 | |
%int2_3202 = torch.constant.int 2 | |
%2941 = torch.aten.transpose.int %2908, %int1_3201, %int2_3202 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2941, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3203 = torch.constant.int 1 | |
%int2_3204 = torch.constant.int 2 | |
%2942 = torch.aten.transpose.int %2850, %int1_3203, %int2_3204 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2942, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_3205 = torch.constant.int 2 | |
%int3_3206 = torch.constant.int 3 | |
%2943 = torch.aten.transpose.int %2941, %int2_3205, %int3_3206 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2943, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_3207 = torch.constant.int 1 | |
%int32_3208 = torch.constant.int 32 | |
%int100_3209 = torch.constant.int 100 | |
%2944 = torch.prim.ListConstruct %int1_3207, %int32_3208, %2865, %int100_3209 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3210 = torch.constant.bool false | |
%2945 = torch.aten.expand %2940, %2944, %false_3210 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2945, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3211 = torch.constant.int 32 | |
%int100_3212 = torch.constant.int 100 | |
%2946 = torch.prim.ListConstruct %int32_3211, %2865, %int100_3212 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2947 = torch.aten.view %2945, %2946 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2947, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3213 = torch.constant.int 1 | |
%int32_3214 = torch.constant.int 32 | |
%int100_3215 = torch.constant.int 100 | |
%2948 = torch.prim.ListConstruct %int1_3213, %int32_3214, %int100_3215, %2894 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3216 = torch.constant.bool false | |
%2949 = torch.aten.expand %2943, %2948, %false_3216 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %2949, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_3217 = torch.constant.int 32 | |
%int100_3218 = torch.constant.int 100 | |
%2950 = torch.prim.ListConstruct %int32_3217, %int100_3218, %2894 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2951 = torch.aten.view %2949, %2950 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %2951, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%2952 = torch.aten.bmm %2947, %2951 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2952, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3219 = torch.constant.int 1 | |
%int32_3220 = torch.constant.int 32 | |
%2953 = torch.prim.ListConstruct %int1_3219, %int32_3220, %2865, %2894 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2954 = torch.aten.view %2952, %2953 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2954, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_3221 = torch.constant.float 1.000000e+01 | |
%2955 = torch.aten.div.Scalar %2954, %float1.000000e01_3221 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2955, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3222 = torch.constant.int 1 | |
%2956 = torch.aten.add.Tensor %2955, %266, %int1_3222 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2956, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_3223 = torch.constant.int 6 | |
%2957 = torch.prims.convert_element_type %2956, %int6_3223 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2957, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_3224 = torch.constant.int -1 | |
%false_3225 = torch.constant.bool false | |
%2958 = torch.aten._softmax %2957, %int-1_3224, %false_3225 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %2958, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_3226 = torch.constant.int 5 | |
%2959 = torch.prims.convert_element_type %2958, %int5_3226 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2959, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3227 = torch.constant.int 1 | |
%int32_3228 = torch.constant.int 32 | |
%2960 = torch.prim.ListConstruct %int1_3227, %int32_3228, %2865, %2894 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3229 = torch.constant.bool false | |
%2961 = torch.aten.expand %2959, %2960, %false_3229 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %2961, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_3230 = torch.constant.int 32 | |
%2962 = torch.prim.ListConstruct %int32_3230, %2865, %2894 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2963 = torch.aten.view %2961, %2962 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %2963, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3231 = torch.constant.int 1 | |
%2964 = torch.aten.size.int %2844, %int1_3231 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_3232 = torch.constant.int 1 | |
%int32_3233 = torch.constant.int 32 | |
%int100_3234 = torch.constant.int 100 | |
%2965 = torch.prim.ListConstruct %int1_3232, %int32_3233, %2964, %int100_3234 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3235 = torch.constant.bool false | |
%2966 = torch.aten.expand %2942, %2965, %false_3235 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2966, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3236 = torch.constant.int 32 | |
%int100_3237 = torch.constant.int 100 | |
%2967 = torch.prim.ListConstruct %int32_3236, %2964, %int100_3237 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2968 = torch.aten.view %2966, %2967 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2968, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%2969 = torch.aten.bmm %2963, %2968 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %2969, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3238 = torch.constant.int 1 | |
%int32_3239 = torch.constant.int 32 | |
%int100_3240 = torch.constant.int 100 | |
%2970 = torch.prim.ListConstruct %int1_3238, %int32_3239, %2865, %int100_3240 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2971 = torch.aten.view %2969, %2970 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %2971, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3241 = torch.constant.int 1 | |
%int2_3242 = torch.constant.int 2 | |
%2972 = torch.aten.transpose.int %2971, %int1_3241, %int2_3242 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2972, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_3243 = torch.constant.int 0 | |
%2973 = torch.aten.clone %2972, %int0_3243 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %2973, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3244 = torch.constant.int 1 | |
%int3200_3245 = torch.constant.int 3200 | |
%2974 = torch.prim.ListConstruct %int1_3244, %2865, %int3200_3245 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2975 = torch.aten._unsafe_view %2973, %2974 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2975, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3246 = torch.constant.int -2 | |
%int-1_3247 = torch.constant.int -1 | |
%2976 = torch.aten.transpose.int %122, %int-2_3246, %int-1_3247 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3248 = torch.constant.int 3200 | |
%2977 = torch.prim.ListConstruct %2865, %int3200_3248 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2978 = torch.aten.view %2975, %2977 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2978, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2979 = torch.aten.mm %2978, %2976 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2979, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3249 = torch.constant.int 1 | |
%int3200_3250 = torch.constant.int 3200 | |
%2980 = torch.prim.ListConstruct %int1_3249, %2865, %int3200_3250 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2981 = torch.aten.view %2979, %2980 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2981, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3251 = torch.constant.int 1 | |
%2982 = torch.aten.add.Tensor %2817, %2981, %int1_3251 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2982, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3252 = torch.constant.int 6 | |
%2983 = torch.prims.convert_element_type %2982, %int6_3252 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2983, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3253 = torch.constant.int 2 | |
%2984 = torch.aten.pow.Tensor_Scalar %2983, %int2_3253 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2984, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3254 = torch.constant.int -1 | |
%2985 = torch.prim.ListConstruct %int-1_3254 : (!torch.int) -> !torch.list<int> | |
%true_3255 = torch.constant.bool true | |
%none_3256 = torch.constant.none | |
%2986 = torch.aten.mean.dim %2984, %2985, %true_3255, %none_3256 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2986, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3257 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3258 = torch.constant.int 1 | |
%2987 = torch.aten.add.Scalar %2986, %float9.999990e-07_3257, %int1_3258 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2987, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2988 = torch.aten.rsqrt %2987 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %2988, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%2989 = torch.aten.mul.Tensor %2983, %2988 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2989, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%2990 = torch.aten.mul.Tensor %123, %2989 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %2990, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3259 = torch.constant.int 5 | |
%2991 = torch.prims.convert_element_type %2990, %int5_3259 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %2991, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3260 = torch.constant.int -2 | |
%int-1_3261 = torch.constant.int -1 | |
%2992 = torch.aten.transpose.int %124, %int-2_3260, %int-1_3261 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3262 = torch.constant.int 3200 | |
%2993 = torch.prim.ListConstruct %240, %int3200_3262 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2994 = torch.aten.view %2991, %2993 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %2994, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%2995 = torch.aten.mm %2994, %2992 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %2995, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3263 = torch.constant.int 1 | |
%int8640_3264 = torch.constant.int 8640 | |
%2996 = torch.prim.ListConstruct %int1_3263, %240, %int8640_3264 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2997 = torch.aten.view %2995, %2996 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2997, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%2998 = torch.aten.silu %2997 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %2998, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3265 = torch.constant.int -2 | |
%int-1_3266 = torch.constant.int -1 | |
%2999 = torch.aten.transpose.int %125, %int-2_3265, %int-1_3266 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3267 = torch.constant.int 3200 | |
%3000 = torch.prim.ListConstruct %240, %int3200_3267 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3001 = torch.aten.view %2991, %3000 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3001, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3002 = torch.aten.mm %3001, %2999 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3002, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3268 = torch.constant.int 1 | |
%int8640_3269 = torch.constant.int 8640 | |
%3003 = torch.prim.ListConstruct %int1_3268, %240, %int8640_3269 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3004 = torch.aten.view %3002, %3003 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3004, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%3005 = torch.aten.mul.Tensor %2998, %3004 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3005, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3270 = torch.constant.int -2 | |
%int-1_3271 = torch.constant.int -1 | |
%3006 = torch.aten.transpose.int %126, %int-2_3270, %int-1_3271 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_3272 = torch.constant.int 1 | |
%3007 = torch.aten.size.int %2997, %int1_3272 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_3273 = torch.constant.int 8640 | |
%3008 = torch.prim.ListConstruct %3007, %int8640_3273 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3009 = torch.aten.view %3005, %3008 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3009, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%3010 = torch.aten.mm %3009, %3006 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3010, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3274 = torch.constant.int 1 | |
%int3200_3275 = torch.constant.int 3200 | |
%3011 = torch.prim.ListConstruct %int1_3274, %3007, %int3200_3275 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3012 = torch.aten.view %3010, %3011 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3012, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3276 = torch.constant.int 1 | |
%3013 = torch.aten.add.Tensor %2982, %3012, %int1_3276 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3013, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3277 = torch.constant.int 6 | |
%3014 = torch.prims.convert_element_type %3013, %int6_3277 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3014, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3278 = torch.constant.int 2 | |
%3015 = torch.aten.pow.Tensor_Scalar %3014, %int2_3278 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3015, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3279 = torch.constant.int -1 | |
%3016 = torch.prim.ListConstruct %int-1_3279 : (!torch.int) -> !torch.list<int> | |
%true_3280 = torch.constant.bool true | |
%none_3281 = torch.constant.none | |
%3017 = torch.aten.mean.dim %3015, %3016, %true_3280, %none_3281 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3017, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3282 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3283 = torch.constant.int 1 | |
%3018 = torch.aten.add.Scalar %3017, %float9.999990e-07_3282, %int1_3283 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3018, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3019 = torch.aten.rsqrt %3018 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3019, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3020 = torch.aten.mul.Tensor %3014, %3019 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3020, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%3021 = torch.aten.mul.Tensor %127, %3020 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3021, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3284 = torch.constant.int 5 | |
%3022 = torch.prims.convert_element_type %3021, %int5_3284 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3022, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3285 = torch.constant.int -2 | |
%int-1_3286 = torch.constant.int -1 | |
%3023 = torch.aten.transpose.int %128, %int-2_3285, %int-1_3286 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3287 = torch.constant.int 3200 | |
%3024 = torch.prim.ListConstruct %240, %int3200_3287 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3025 = torch.aten.view %3022, %3024 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3025, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3026 = torch.aten.mm %3025, %3023 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3026, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3288 = torch.constant.int 1 | |
%int3200_3289 = torch.constant.int 3200 | |
%3027 = torch.prim.ListConstruct %int1_3288, %240, %int3200_3289 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3028 = torch.aten.view %3026, %3027 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3028, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3290 = torch.constant.int -2 | |
%int-1_3291 = torch.constant.int -1 | |
%3029 = torch.aten.transpose.int %129, %int-2_3290, %int-1_3291 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3292 = torch.constant.int 3200 | |
%3030 = torch.prim.ListConstruct %240, %int3200_3292 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3031 = torch.aten.view %3022, %3030 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3031, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3032 = torch.aten.mm %3031, %3029 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3032, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3293 = torch.constant.int 1 | |
%int3200_3294 = torch.constant.int 3200 | |
%3033 = torch.prim.ListConstruct %int1_3293, %240, %int3200_3294 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3034 = torch.aten.view %3032, %3033 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3034, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3295 = torch.constant.int -2 | |
%int-1_3296 = torch.constant.int -1 | |
%3035 = torch.aten.transpose.int %130, %int-2_3295, %int-1_3296 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3297 = torch.constant.int 3200 | |
%3036 = torch.prim.ListConstruct %240, %int3200_3297 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3037 = torch.aten.view %3022, %3036 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3037, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3038 = torch.aten.mm %3037, %3035 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3038, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3298 = torch.constant.int 1 | |
%int3200_3299 = torch.constant.int 3200 | |
%3039 = torch.prim.ListConstruct %int1_3298, %240, %int3200_3299 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3040 = torch.aten.view %3038, %3039 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3040, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3300 = torch.constant.int 1 | |
%int32_3301 = torch.constant.int 32 | |
%int100_3302 = torch.constant.int 100 | |
%3041 = torch.prim.ListConstruct %int1_3300, %240, %int32_3301, %int100_3302 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3042 = torch.aten.view %3028, %3041 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3042, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3303 = torch.constant.int 1 | |
%int32_3304 = torch.constant.int 32 | |
%int100_3305 = torch.constant.int 100 | |
%3043 = torch.prim.ListConstruct %int1_3303, %240, %int32_3304, %int100_3305 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3044 = torch.aten.view %3034, %3043 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3044, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3306 = torch.constant.int 1 | |
%int32_3307 = torch.constant.int 32 | |
%int100_3308 = torch.constant.int 100 | |
%3045 = torch.prim.ListConstruct %int1_3306, %240, %int32_3307, %int100_3308 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3046 = torch.aten.view %3040, %3045 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3046, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3309 = torch.constant.int 2048 | |
%none_3310 = torch.constant.none | |
%none_3311 = torch.constant.none | |
%cpu_3312 = torch.constant.device "cpu" | |
%false_3313 = torch.constant.bool false | |
%3047 = torch.aten.arange %int2048_3309, %none_3310, %none_3311, %cpu_3312, %false_3313 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3314 = torch.constant.int 0 | |
%int100_3315 = torch.constant.int 100 | |
%int2_3316 = torch.constant.int 2 | |
%none_3317 = torch.constant.none | |
%none_3318 = torch.constant.none | |
%cpu_3319 = torch.constant.device "cpu" | |
%false_3320 = torch.constant.bool false | |
%3048 = torch.aten.arange.start_step %int0_3314, %int100_3315, %int2_3316, %none_3317, %none_3318, %cpu_3319, %false_3320 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3321 = torch.constant.int 0 | |
%int0_3322 = torch.constant.int 0 | |
%int50_3323 = torch.constant.int 50 | |
%int1_3324 = torch.constant.int 1 | |
%3049 = torch.aten.slice.Tensor %3048, %int0_3321, %int0_3322, %int50_3323, %int1_3324 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3325 = torch.constant.int 6 | |
%3050 = torch.prims.convert_element_type %3049, %int6_3325 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3326 = torch.constant.int 100 | |
%3051 = torch.aten.div.Scalar %3050, %int100_3326 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3327 = torch.constant.float 1.000000e+04 | |
%3052 = torch.aten.pow.Scalar %float1.000000e04_3327, %3051 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%3053 = torch.aten.reciprocal %3052 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3328 = torch.constant.float 1.000000e+00 | |
%3054 = torch.aten.mul.Scalar %3053, %float1.000000e00_3328 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3329 = torch.constant.int 2048 | |
%int1_3330 = torch.constant.int 1 | |
%3055 = torch.prim.ListConstruct %int2048_3329, %int1_3330 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3056 = torch.aten.view %3047, %3055 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%3057 = torch.aten.mul.Tensor %3056, %3054 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3058 = torch.aten.cos %3057 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3059 = torch.aten.sin %3057 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3060 = torch.aten.complex %3058, %3059 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3331 = torch.constant.int 1 | |
%3061 = torch.aten.size.int %3028, %int1_3331 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3332 = torch.constant.int 0 | |
%3062 = torch.aten.add.int %int0_3332, %3061 : !torch.int, !torch.int -> !torch.int | |
%int0_3333 = torch.constant.int 0 | |
%int0_3334 = torch.constant.int 0 | |
%int1_3335 = torch.constant.int 1 | |
%3063 = torch.aten.slice.Tensor %3060, %int0_3333, %int0_3334, %3062, %int1_3335 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3063, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3336 = torch.constant.int 1 | |
%int0_3337 = torch.constant.int 0 | |
%int9223372036854775807_3338 = torch.constant.int 9223372036854775807 | |
%int1_3339 = torch.constant.int 1 | |
%3064 = torch.aten.slice.Tensor %3063, %int1_3336, %int0_3337, %int9223372036854775807_3338, %int1_3339 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3064, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3340 = torch.constant.int 0 | |
%3065 = torch.aten.unsqueeze %3064, %int0_3340 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %3065, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3341 = torch.constant.int 2 | |
%3066 = torch.aten.unsqueeze %3065, %int2_3341 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3066, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3342 = torch.constant.int 3 | |
%int0_3343 = torch.constant.int 0 | |
%int9223372036854775807_3344 = torch.constant.int 9223372036854775807 | |
%int1_3345 = torch.constant.int 1 | |
%3067 = torch.aten.slice.Tensor %3066, %int3_3342, %int0_3343, %int9223372036854775807_3344, %int1_3345 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3067, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%3068 = torch_c.to_builtin_tensor %3042 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3346 = arith.constant 1 : index | |
%dim_3347 = tensor.dim %3068, %c1_3346 : tensor<1x?x32x100xf16> | |
%3069 = flow.tensor.bitcast %3068 : tensor<1x?x32x100xf16>{%dim_3347} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3347} | |
%3070 = torch_c.from_builtin_tensor %3069 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %3070, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%3071 = torch.aten.mul.Tensor %3070, %3067 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %3071, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%3072 = torch_c.to_builtin_tensor %3071 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3348 = arith.constant 1 : index | |
%dim_3349 = tensor.dim %3072, %c1_3348 : tensor<1x?x32x50xcomplex<f32>> | |
%3073 = flow.tensor.bitcast %3072 : tensor<1x?x32x50xcomplex<f32>>{%dim_3349} -> tensor<1x?x32x100xf32>{%dim_3349} | |
%3074 = torch_c.from_builtin_tensor %3073 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %3074, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3350 = torch.constant.int 5 | |
%3075 = torch.prims.convert_element_type %3074, %int5_3350 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3075, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3351 = torch.constant.int 2048 | |
%none_3352 = torch.constant.none | |
%none_3353 = torch.constant.none | |
%cpu_3354 = torch.constant.device "cpu" | |
%false_3355 = torch.constant.bool false | |
%3076 = torch.aten.arange %int2048_3351, %none_3352, %none_3353, %cpu_3354, %false_3355 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3356 = torch.constant.int 0 | |
%int100_3357 = torch.constant.int 100 | |
%int2_3358 = torch.constant.int 2 | |
%none_3359 = torch.constant.none | |
%none_3360 = torch.constant.none | |
%cpu_3361 = torch.constant.device "cpu" | |
%false_3362 = torch.constant.bool false | |
%3077 = torch.aten.arange.start_step %int0_3356, %int100_3357, %int2_3358, %none_3359, %none_3360, %cpu_3361, %false_3362 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3363 = torch.constant.int 0 | |
%int0_3364 = torch.constant.int 0 | |
%int50_3365 = torch.constant.int 50 | |
%int1_3366 = torch.constant.int 1 | |
%3078 = torch.aten.slice.Tensor %3077, %int0_3363, %int0_3364, %int50_3365, %int1_3366 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3367 = torch.constant.int 6 | |
%3079 = torch.prims.convert_element_type %3078, %int6_3367 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3368 = torch.constant.int 100 | |
%3080 = torch.aten.div.Scalar %3079, %int100_3368 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3369 = torch.constant.float 1.000000e+04 | |
%3081 = torch.aten.pow.Scalar %float1.000000e04_3369, %3080 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%3082 = torch.aten.reciprocal %3081 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3370 = torch.constant.float 1.000000e+00 | |
%3083 = torch.aten.mul.Scalar %3082, %float1.000000e00_3370 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3371 = torch.constant.int 2048 | |
%int1_3372 = torch.constant.int 1 | |
%3084 = torch.prim.ListConstruct %int2048_3371, %int1_3372 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3085 = torch.aten.view %3076, %3084 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%3086 = torch.aten.mul.Tensor %3085, %3083 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3087 = torch.aten.cos %3086 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3088 = torch.aten.sin %3086 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3089 = torch.aten.complex %3087, %3088 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3373 = torch.constant.int 1 | |
%3090 = torch.aten.size.int %3034, %int1_3373 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3374 = torch.constant.int 0 | |
%3091 = torch.aten.add.int %int0_3374, %3090 : !torch.int, !torch.int -> !torch.int | |
%int0_3375 = torch.constant.int 0 | |
%int0_3376 = torch.constant.int 0 | |
%int1_3377 = torch.constant.int 1 | |
%3092 = torch.aten.slice.Tensor %3089, %int0_3375, %int0_3376, %3091, %int1_3377 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3092, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3378 = torch.constant.int 1 | |
%int0_3379 = torch.constant.int 0 | |
%int9223372036854775807_3380 = torch.constant.int 9223372036854775807 | |
%int1_3381 = torch.constant.int 1 | |
%3093 = torch.aten.slice.Tensor %3092, %int1_3378, %int0_3379, %int9223372036854775807_3380, %int1_3381 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3093, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3382 = torch.constant.int 0 | |
%3094 = torch.aten.unsqueeze %3093, %int0_3382 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %3094, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3383 = torch.constant.int 2 | |
%3095 = torch.aten.unsqueeze %3094, %int2_3383 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3095, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3384 = torch.constant.int 3 | |
%int0_3385 = torch.constant.int 0 | |
%int9223372036854775807_3386 = torch.constant.int 9223372036854775807 | |
%int1_3387 = torch.constant.int 1 | |
%3096 = torch.aten.slice.Tensor %3095, %int3_3384, %int0_3385, %int9223372036854775807_3386, %int1_3387 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3096, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%3097 = torch_c.to_builtin_tensor %3044 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3388 = arith.constant 1 : index | |
%dim_3389 = tensor.dim %3097, %c1_3388 : tensor<1x?x32x100xf16> | |
%3098 = flow.tensor.bitcast %3097 : tensor<1x?x32x100xf16>{%dim_3389} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3389} | |
%3099 = torch_c.from_builtin_tensor %3098 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %3099, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%3100 = torch.aten.mul.Tensor %3099, %3096 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %3100, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%3101 = torch_c.to_builtin_tensor %3100 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3390 = arith.constant 1 : index | |
%dim_3391 = tensor.dim %3101, %c1_3390 : tensor<1x?x32x50xcomplex<f32>> | |
%3102 = flow.tensor.bitcast %3101 : tensor<1x?x32x50xcomplex<f32>>{%dim_3391} -> tensor<1x?x32x100xf32>{%dim_3391} | |
%3103 = torch_c.from_builtin_tensor %3102 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %3103, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3392 = torch.constant.int 5 | |
%3104 = torch.prims.convert_element_type %3103, %int5_3392 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3104, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_3393 = torch.constant.int 52 | |
%3105 = torch.aten.mul.Scalar %arg2, %int52_3393 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3105, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int28 = torch.constant.int 28 | |
%int1_3394 = torch.constant.int 1 | |
%3106 = torch.aten.add.Scalar %3105, %int28, %int1_3394 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3106, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_3395 = torch.constant.int 1 | |
%int16_3396 = torch.constant.int 16 | |
%int32_3397 = torch.constant.int 32 | |
%int100_3398 = torch.constant.int 100 | |
%3107 = torch.prim.ListConstruct %int1_3395, %368, %int16_3396, %int32_3397, %int100_3398 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3108 = torch.aten.view %3104, %3107 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %3108, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3399 = torch.constant.int 16 | |
%int32_3400 = torch.constant.int 32 | |
%int100_3401 = torch.constant.int 100 | |
%3109 = torch.prim.ListConstruct %368, %int16_3399, %int32_3400, %int100_3401 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3110 = torch.aten.view %3108, %3109 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3110, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%3111 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%3112 = torch.aten.view %3106, %3111 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3112, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_3402 = torch.constant.int 1 | |
%int16_3403 = torch.constant.int 16 | |
%int32_3404 = torch.constant.int 32 | |
%int100_3405 = torch.constant.int 100 | |
%3113 = torch.prim.ListConstruct %int1_3402, %368, %int16_3403, %int32_3404, %int100_3405 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3114 = torch.aten.view %3046, %3113 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %3114, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3406 = torch.constant.int 16 | |
%int32_3407 = torch.constant.int 32 | |
%int100_3408 = torch.constant.int 100 | |
%3115 = torch.prim.ListConstruct %368, %int16_3406, %int32_3407, %int100_3408 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3116 = torch.aten.view %3114, %3115 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3116, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_3409 = torch.constant.int 1 | |
%int1_3410 = torch.constant.int 1 | |
%3117 = torch.aten.add.Scalar %3106, %int1_3409, %int1_3410 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3117, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%3118 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%3119 = torch.aten.view %3117, %3118 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3119, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%3120 = torch.prim.ListConstruct %3112, %3119 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_3411 = torch.constant.int 0 | |
%3121 = torch.aten.cat %3120, %int0_3411 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3121, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%3122 = torch.prim.ListConstruct %3110, %3116 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_3412 = torch.constant.int 0 | |
%3123 = torch.aten.cat %3122, %int0_3412 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3123, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3413 = torch.constant.int 26 | |
%int2_3414 = torch.constant.int 2 | |
%int16_3415 = torch.constant.int 16 | |
%int32_3416 = torch.constant.int 32 | |
%int100_3417 = torch.constant.int 100 | |
%3124 = torch.prim.ListConstruct %359, %int26_3413, %int2_3414, %int16_3415, %int32_3416, %int100_3417 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3125 = torch.aten.view %2939, %3124 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %3125, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_3418 = torch.constant.int 26 | |
%3126 = torch.aten.mul.int %359, %int26_3418 : !torch.int, !torch.int -> !torch.int | |
%int2_3419 = torch.constant.int 2 | |
%3127 = torch.aten.mul.int %3126, %int2_3419 : !torch.int, !torch.int -> !torch.int | |
%int16_3420 = torch.constant.int 16 | |
%int32_3421 = torch.constant.int 32 | |
%int100_3422 = torch.constant.int 100 | |
%3128 = torch.prim.ListConstruct %3127, %int16_3420, %int32_3421, %int100_3422 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3129 = torch.aten.view %3125, %3128 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3129, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%3130 = torch.prim.ListConstruct %3121 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_3423 = torch.constant.bool false | |
%3131 = torch.aten.index_put %3129, %3130, %3123, %false_3423 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3131, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3424 = torch.constant.int 26 | |
%int2_3425 = torch.constant.int 2 | |
%int16_3426 = torch.constant.int 16 | |
%int32_3427 = torch.constant.int 32 | |
%int100_3428 = torch.constant.int 100 | |
%3132 = torch.prim.ListConstruct %359, %int26_3424, %int2_3425, %int16_3426, %int32_3427, %int100_3428 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3133 = torch.aten.view %3131, %3132 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %3133, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_3429 = torch.constant.int 2662400 | |
%3134 = torch.prim.ListConstruct %359, %int2662400_3429 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3135 = torch.aten.view %3133, %3134 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %3135, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_3430 = torch.constant.int 1 | |
%int2_3431 = torch.constant.int 2 | |
%3136 = torch.aten.transpose.int %3075, %int1_3430, %int2_3431 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3136, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3432 = torch.constant.int 1 | |
%int2_3433 = torch.constant.int 2 | |
%3137 = torch.aten.transpose.int %3104, %int1_3432, %int2_3433 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3137, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3434 = torch.constant.int 1 | |
%int2_3435 = torch.constant.int 2 | |
%3138 = torch.aten.transpose.int %3046, %int1_3434, %int2_3435 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3138, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_3436 = torch.constant.int 2 | |
%int3_3437 = torch.constant.int 3 | |
%3139 = torch.aten.transpose.int %3137, %int2_3436, %int3_3437 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %3139, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_3438 = torch.constant.int 1 | |
%int32_3439 = torch.constant.int 32 | |
%int100_3440 = torch.constant.int 100 | |
%3140 = torch.prim.ListConstruct %int1_3438, %int32_3439, %3061, %int100_3440 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3441 = torch.constant.bool false | |
%3141 = torch.aten.expand %3136, %3140, %false_3441 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3141, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3442 = torch.constant.int 32 | |
%int100_3443 = torch.constant.int 100 | |
%3142 = torch.prim.ListConstruct %int32_3442, %3061, %int100_3443 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3143 = torch.aten.view %3141, %3142 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3143, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3444 = torch.constant.int 1 | |
%int32_3445 = torch.constant.int 32 | |
%int100_3446 = torch.constant.int 100 | |
%3144 = torch.prim.ListConstruct %int1_3444, %int32_3445, %int100_3446, %3090 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3447 = torch.constant.bool false | |
%3145 = torch.aten.expand %3139, %3144, %false_3447 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %3145, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_3448 = torch.constant.int 32 | |
%int100_3449 = torch.constant.int 100 | |
%3146 = torch.prim.ListConstruct %int32_3448, %int100_3449, %3090 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3147 = torch.aten.view %3145, %3146 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %3147, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%3148 = torch.aten.bmm %3143, %3147 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %3148, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3450 = torch.constant.int 1 | |
%int32_3451 = torch.constant.int 32 | |
%3149 = torch.prim.ListConstruct %int1_3450, %int32_3451, %3061, %3090 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3150 = torch.aten.view %3148, %3149 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3150, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_3452 = torch.constant.float 1.000000e+01 | |
%3151 = torch.aten.div.Scalar %3150, %float1.000000e01_3452 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3151, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3453 = torch.constant.int 1 | |
%3152 = torch.aten.add.Tensor %3151, %266, %int1_3453 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3152, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_3454 = torch.constant.int 6 | |
%3153 = torch.prims.convert_element_type %3152, %int6_3454 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %3153, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_3455 = torch.constant.int -1 | |
%false_3456 = torch.constant.bool false | |
%3154 = torch.aten._softmax %3153, %int-1_3455, %false_3456 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %3154, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_3457 = torch.constant.int 5 | |
%3155 = torch.prims.convert_element_type %3154, %int5_3457 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3155, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3458 = torch.constant.int 1 | |
%int32_3459 = torch.constant.int 32 | |
%3156 = torch.prim.ListConstruct %int1_3458, %int32_3459, %3061, %3090 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3460 = torch.constant.bool false | |
%3157 = torch.aten.expand %3155, %3156, %false_3460 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3157, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_3461 = torch.constant.int 32 | |
%3158 = torch.prim.ListConstruct %int32_3461, %3061, %3090 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3159 = torch.aten.view %3157, %3158 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %3159, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3462 = torch.constant.int 1 | |
%3160 = torch.aten.size.int %3040, %int1_3462 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_3463 = torch.constant.int 1 | |
%int32_3464 = torch.constant.int 32 | |
%int100_3465 = torch.constant.int 100 | |
%3161 = torch.prim.ListConstruct %int1_3463, %int32_3464, %3160, %int100_3465 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3466 = torch.constant.bool false | |
%3162 = torch.aten.expand %3138, %3161, %false_3466 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3162, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3467 = torch.constant.int 32 | |
%int100_3468 = torch.constant.int 100 | |
%3163 = torch.prim.ListConstruct %int32_3467, %3160, %int100_3468 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3164 = torch.aten.view %3162, %3163 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3164, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%3165 = torch.aten.bmm %3159, %3164 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3165, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3469 = torch.constant.int 1 | |
%int32_3470 = torch.constant.int 32 | |
%int100_3471 = torch.constant.int 100 | |
%3166 = torch.prim.ListConstruct %int1_3469, %int32_3470, %3061, %int100_3471 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3167 = torch.aten.view %3165, %3166 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3167, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3472 = torch.constant.int 1 | |
%int2_3473 = torch.constant.int 2 | |
%3168 = torch.aten.transpose.int %3167, %int1_3472, %int2_3473 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3168, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_3474 = torch.constant.int 0 | |
%3169 = torch.aten.clone %3168, %int0_3474 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3169, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3475 = torch.constant.int 1 | |
%int3200_3476 = torch.constant.int 3200 | |
%3170 = torch.prim.ListConstruct %int1_3475, %3061, %int3200_3476 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3171 = torch.aten._unsafe_view %3169, %3170 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3171, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3477 = torch.constant.int -2 | |
%int-1_3478 = torch.constant.int -1 | |
%3172 = torch.aten.transpose.int %131, %int-2_3477, %int-1_3478 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3479 = torch.constant.int 3200 | |
%3173 = torch.prim.ListConstruct %3061, %int3200_3479 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3174 = torch.aten.view %3171, %3173 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3174, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3175 = torch.aten.mm %3174, %3172 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3175, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3480 = torch.constant.int 1 | |
%int3200_3481 = torch.constant.int 3200 | |
%3176 = torch.prim.ListConstruct %int1_3480, %3061, %int3200_3481 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3177 = torch.aten.view %3175, %3176 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3177, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3482 = torch.constant.int 1 | |
%3178 = torch.aten.add.Tensor %3013, %3177, %int1_3482 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3178, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3483 = torch.constant.int 6 | |
%3179 = torch.prims.convert_element_type %3178, %int6_3483 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3179, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3484 = torch.constant.int 2 | |
%3180 = torch.aten.pow.Tensor_Scalar %3179, %int2_3484 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3180, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3485 = torch.constant.int -1 | |
%3181 = torch.prim.ListConstruct %int-1_3485 : (!torch.int) -> !torch.list<int> | |
%true_3486 = torch.constant.bool true | |
%none_3487 = torch.constant.none | |
%3182 = torch.aten.mean.dim %3180, %3181, %true_3486, %none_3487 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3182, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3488 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3489 = torch.constant.int 1 | |
%3183 = torch.aten.add.Scalar %3182, %float9.999990e-07_3488, %int1_3489 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3183, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3184 = torch.aten.rsqrt %3183 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3184, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3185 = torch.aten.mul.Tensor %3179, %3184 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3185, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%3186 = torch.aten.mul.Tensor %132, %3185 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3186, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3490 = torch.constant.int 5 | |
%3187 = torch.prims.convert_element_type %3186, %int5_3490 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3187, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3491 = torch.constant.int -2 | |
%int-1_3492 = torch.constant.int -1 | |
%3188 = torch.aten.transpose.int %133, %int-2_3491, %int-1_3492 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3493 = torch.constant.int 3200 | |
%3189 = torch.prim.ListConstruct %240, %int3200_3493 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3190 = torch.aten.view %3187, %3189 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3190, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3191 = torch.aten.mm %3190, %3188 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3191, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3494 = torch.constant.int 1 | |
%int8640_3495 = torch.constant.int 8640 | |
%3192 = torch.prim.ListConstruct %int1_3494, %240, %int8640_3495 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3193 = torch.aten.view %3191, %3192 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3193, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%3194 = torch.aten.silu %3193 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3194, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3496 = torch.constant.int -2 | |
%int-1_3497 = torch.constant.int -1 | |
%3195 = torch.aten.transpose.int %134, %int-2_3496, %int-1_3497 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3498 = torch.constant.int 3200 | |
%3196 = torch.prim.ListConstruct %240, %int3200_3498 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3197 = torch.aten.view %3187, %3196 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3197, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3198 = torch.aten.mm %3197, %3195 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3198, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3499 = torch.constant.int 1 | |
%int8640_3500 = torch.constant.int 8640 | |
%3199 = torch.prim.ListConstruct %int1_3499, %240, %int8640_3500 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3200 = torch.aten.view %3198, %3199 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3200, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%3201 = torch.aten.mul.Tensor %3194, %3200 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3201, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3501 = torch.constant.int -2 | |
%int-1_3502 = torch.constant.int -1 | |
%3202 = torch.aten.transpose.int %135, %int-2_3501, %int-1_3502 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_3503 = torch.constant.int 1 | |
%3203 = torch.aten.size.int %3193, %int1_3503 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_3504 = torch.constant.int 8640 | |
%3204 = torch.prim.ListConstruct %3203, %int8640_3504 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3205 = torch.aten.view %3201, %3204 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3205, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%3206 = torch.aten.mm %3205, %3202 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3206, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3505 = torch.constant.int 1 | |
%int3200_3506 = torch.constant.int 3200 | |
%3207 = torch.prim.ListConstruct %int1_3505, %3203, %int3200_3506 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3208 = torch.aten.view %3206, %3207 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3208, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3507 = torch.constant.int 1 | |
%3209 = torch.aten.add.Tensor %3178, %3208, %int1_3507 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3209, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3508 = torch.constant.int 6 | |
%3210 = torch.prims.convert_element_type %3209, %int6_3508 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3210, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3509 = torch.constant.int 2 | |
%3211 = torch.aten.pow.Tensor_Scalar %3210, %int2_3509 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3211, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3510 = torch.constant.int -1 | |
%3212 = torch.prim.ListConstruct %int-1_3510 : (!torch.int) -> !torch.list<int> | |
%true_3511 = torch.constant.bool true | |
%none_3512 = torch.constant.none | |
%3213 = torch.aten.mean.dim %3211, %3212, %true_3511, %none_3512 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3213, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3513 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3514 = torch.constant.int 1 | |
%3214 = torch.aten.add.Scalar %3213, %float9.999990e-07_3513, %int1_3514 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3214, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3215 = torch.aten.rsqrt %3214 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3215, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3216 = torch.aten.mul.Tensor %3210, %3215 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3216, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%3217 = torch.aten.mul.Tensor %136, %3216 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3217, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3515 = torch.constant.int 5 | |
%3218 = torch.prims.convert_element_type %3217, %int5_3515 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3218, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3516 = torch.constant.int -2 | |
%int-1_3517 = torch.constant.int -1 | |
%3219 = torch.aten.transpose.int %137, %int-2_3516, %int-1_3517 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3518 = torch.constant.int 3200 | |
%3220 = torch.prim.ListConstruct %240, %int3200_3518 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3221 = torch.aten.view %3218, %3220 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3221, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3222 = torch.aten.mm %3221, %3219 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3222, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3519 = torch.constant.int 1 | |
%int3200_3520 = torch.constant.int 3200 | |
%3223 = torch.prim.ListConstruct %int1_3519, %240, %int3200_3520 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3224 = torch.aten.view %3222, %3223 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3224, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3521 = torch.constant.int -2 | |
%int-1_3522 = torch.constant.int -1 | |
%3225 = torch.aten.transpose.int %138, %int-2_3521, %int-1_3522 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3523 = torch.constant.int 3200 | |
%3226 = torch.prim.ListConstruct %240, %int3200_3523 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3227 = torch.aten.view %3218, %3226 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3227, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3228 = torch.aten.mm %3227, %3225 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3228, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3524 = torch.constant.int 1 | |
%int3200_3525 = torch.constant.int 3200 | |
%3229 = torch.prim.ListConstruct %int1_3524, %240, %int3200_3525 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3230 = torch.aten.view %3228, %3229 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3230, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3526 = torch.constant.int -2 | |
%int-1_3527 = torch.constant.int -1 | |
%3231 = torch.aten.transpose.int %139, %int-2_3526, %int-1_3527 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3528 = torch.constant.int 3200 | |
%3232 = torch.prim.ListConstruct %240, %int3200_3528 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3233 = torch.aten.view %3218, %3232 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3233, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3234 = torch.aten.mm %3233, %3231 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3234, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3529 = torch.constant.int 1 | |
%int3200_3530 = torch.constant.int 3200 | |
%3235 = torch.prim.ListConstruct %int1_3529, %240, %int3200_3530 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3236 = torch.aten.view %3234, %3235 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3236, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3531 = torch.constant.int 1 | |
%int32_3532 = torch.constant.int 32 | |
%int100_3533 = torch.constant.int 100 | |
%3237 = torch.prim.ListConstruct %int1_3531, %240, %int32_3532, %int100_3533 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3238 = torch.aten.view %3224, %3237 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3238, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3534 = torch.constant.int 1 | |
%int32_3535 = torch.constant.int 32 | |
%int100_3536 = torch.constant.int 100 | |
%3239 = torch.prim.ListConstruct %int1_3534, %240, %int32_3535, %int100_3536 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3240 = torch.aten.view %3230, %3239 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3240, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3537 = torch.constant.int 1 | |
%int32_3538 = torch.constant.int 32 | |
%int100_3539 = torch.constant.int 100 | |
%3241 = torch.prim.ListConstruct %int1_3537, %240, %int32_3538, %int100_3539 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3242 = torch.aten.view %3236, %3241 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3242, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3540 = torch.constant.int 2048 | |
%none_3541 = torch.constant.none | |
%none_3542 = torch.constant.none | |
%cpu_3543 = torch.constant.device "cpu" | |
%false_3544 = torch.constant.bool false | |
%3243 = torch.aten.arange %int2048_3540, %none_3541, %none_3542, %cpu_3543, %false_3544 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3545 = torch.constant.int 0 | |
%int100_3546 = torch.constant.int 100 | |
%int2_3547 = torch.constant.int 2 | |
%none_3548 = torch.constant.none | |
%none_3549 = torch.constant.none | |
%cpu_3550 = torch.constant.device "cpu" | |
%false_3551 = torch.constant.bool false | |
%3244 = torch.aten.arange.start_step %int0_3545, %int100_3546, %int2_3547, %none_3548, %none_3549, %cpu_3550, %false_3551 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3552 = torch.constant.int 0 | |
%int0_3553 = torch.constant.int 0 | |
%int50_3554 = torch.constant.int 50 | |
%int1_3555 = torch.constant.int 1 | |
%3245 = torch.aten.slice.Tensor %3244, %int0_3552, %int0_3553, %int50_3554, %int1_3555 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3556 = torch.constant.int 6 | |
%3246 = torch.prims.convert_element_type %3245, %int6_3556 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3557 = torch.constant.int 100 | |
%3247 = torch.aten.div.Scalar %3246, %int100_3557 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3558 = torch.constant.float 1.000000e+04 | |
%3248 = torch.aten.pow.Scalar %float1.000000e04_3558, %3247 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%3249 = torch.aten.reciprocal %3248 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3559 = torch.constant.float 1.000000e+00 | |
%3250 = torch.aten.mul.Scalar %3249, %float1.000000e00_3559 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3560 = torch.constant.int 2048 | |
%int1_3561 = torch.constant.int 1 | |
%3251 = torch.prim.ListConstruct %int2048_3560, %int1_3561 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3252 = torch.aten.view %3243, %3251 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%3253 = torch.aten.mul.Tensor %3252, %3250 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3254 = torch.aten.cos %3253 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3255 = torch.aten.sin %3253 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3256 = torch.aten.complex %3254, %3255 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3562 = torch.constant.int 1 | |
%3257 = torch.aten.size.int %3224, %int1_3562 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3563 = torch.constant.int 0 | |
%3258 = torch.aten.add.int %int0_3563, %3257 : !torch.int, !torch.int -> !torch.int | |
%int0_3564 = torch.constant.int 0 | |
%int0_3565 = torch.constant.int 0 | |
%int1_3566 = torch.constant.int 1 | |
%3259 = torch.aten.slice.Tensor %3256, %int0_3564, %int0_3565, %3258, %int1_3566 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3259, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3567 = torch.constant.int 1 | |
%int0_3568 = torch.constant.int 0 | |
%int9223372036854775807_3569 = torch.constant.int 9223372036854775807 | |
%int1_3570 = torch.constant.int 1 | |
%3260 = torch.aten.slice.Tensor %3259, %int1_3567, %int0_3568, %int9223372036854775807_3569, %int1_3570 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3260, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3571 = torch.constant.int 0 | |
%3261 = torch.aten.unsqueeze %3260, %int0_3571 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %3261, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3572 = torch.constant.int 2 | |
%3262 = torch.aten.unsqueeze %3261, %int2_3572 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3262, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3573 = torch.constant.int 3 | |
%int0_3574 = torch.constant.int 0 | |
%int9223372036854775807_3575 = torch.constant.int 9223372036854775807 | |
%int1_3576 = torch.constant.int 1 | |
%3263 = torch.aten.slice.Tensor %3262, %int3_3573, %int0_3574, %int9223372036854775807_3575, %int1_3576 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3263, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%3264 = torch_c.to_builtin_tensor %3238 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3577 = arith.constant 1 : index | |
%dim_3578 = tensor.dim %3264, %c1_3577 : tensor<1x?x32x100xf16> | |
%3265 = flow.tensor.bitcast %3264 : tensor<1x?x32x100xf16>{%dim_3578} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3578} | |
%3266 = torch_c.from_builtin_tensor %3265 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %3266, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%3267 = torch.aten.mul.Tensor %3266, %3263 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %3267, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%3268 = torch_c.to_builtin_tensor %3267 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3579 = arith.constant 1 : index | |
%dim_3580 = tensor.dim %3268, %c1_3579 : tensor<1x?x32x50xcomplex<f32>> | |
%3269 = flow.tensor.bitcast %3268 : tensor<1x?x32x50xcomplex<f32>>{%dim_3580} -> tensor<1x?x32x100xf32>{%dim_3580} | |
%3270 = torch_c.from_builtin_tensor %3269 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %3270, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3581 = torch.constant.int 5 | |
%3271 = torch.prims.convert_element_type %3270, %int5_3581 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3271, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3582 = torch.constant.int 2048 | |
%none_3583 = torch.constant.none | |
%none_3584 = torch.constant.none | |
%cpu_3585 = torch.constant.device "cpu" | |
%false_3586 = torch.constant.bool false | |
%3272 = torch.aten.arange %int2048_3582, %none_3583, %none_3584, %cpu_3585, %false_3586 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3587 = torch.constant.int 0 | |
%int100_3588 = torch.constant.int 100 | |
%int2_3589 = torch.constant.int 2 | |
%none_3590 = torch.constant.none | |
%none_3591 = torch.constant.none | |
%cpu_3592 = torch.constant.device "cpu" | |
%false_3593 = torch.constant.bool false | |
%3273 = torch.aten.arange.start_step %int0_3587, %int100_3588, %int2_3589, %none_3590, %none_3591, %cpu_3592, %false_3593 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3594 = torch.constant.int 0 | |
%int0_3595 = torch.constant.int 0 | |
%int50_3596 = torch.constant.int 50 | |
%int1_3597 = torch.constant.int 1 | |
%3274 = torch.aten.slice.Tensor %3273, %int0_3594, %int0_3595, %int50_3596, %int1_3597 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3598 = torch.constant.int 6 | |
%3275 = torch.prims.convert_element_type %3274, %int6_3598 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3599 = torch.constant.int 100 | |
%3276 = torch.aten.div.Scalar %3275, %int100_3599 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3600 = torch.constant.float 1.000000e+04 | |
%3277 = torch.aten.pow.Scalar %float1.000000e04_3600, %3276 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%3278 = torch.aten.reciprocal %3277 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3601 = torch.constant.float 1.000000e+00 | |
%3279 = torch.aten.mul.Scalar %3278, %float1.000000e00_3601 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3602 = torch.constant.int 2048 | |
%int1_3603 = torch.constant.int 1 | |
%3280 = torch.prim.ListConstruct %int2048_3602, %int1_3603 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3281 = torch.aten.view %3272, %3280 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%3282 = torch.aten.mul.Tensor %3281, %3279 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3283 = torch.aten.cos %3282 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3284 = torch.aten.sin %3282 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3285 = torch.aten.complex %3283, %3284 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3604 = torch.constant.int 1 | |
%3286 = torch.aten.size.int %3230, %int1_3604 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3605 = torch.constant.int 0 | |
%3287 = torch.aten.add.int %int0_3605, %3286 : !torch.int, !torch.int -> !torch.int | |
%int0_3606 = torch.constant.int 0 | |
%int0_3607 = torch.constant.int 0 | |
%int1_3608 = torch.constant.int 1 | |
%3288 = torch.aten.slice.Tensor %3285, %int0_3606, %int0_3607, %3287, %int1_3608 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3288, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3609 = torch.constant.int 1 | |
%int0_3610 = torch.constant.int 0 | |
%int9223372036854775807_3611 = torch.constant.int 9223372036854775807 | |
%int1_3612 = torch.constant.int 1 | |
%3289 = torch.aten.slice.Tensor %3288, %int1_3609, %int0_3610, %int9223372036854775807_3611, %int1_3612 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3289, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3613 = torch.constant.int 0 | |
%3290 = torch.aten.unsqueeze %3289, %int0_3613 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %3290, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3614 = torch.constant.int 2 | |
%3291 = torch.aten.unsqueeze %3290, %int2_3614 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3291, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3615 = torch.constant.int 3 | |
%int0_3616 = torch.constant.int 0 | |
%int9223372036854775807_3617 = torch.constant.int 9223372036854775807 | |
%int1_3618 = torch.constant.int 1 | |
%3292 = torch.aten.slice.Tensor %3291, %int3_3615, %int0_3616, %int9223372036854775807_3617, %int1_3618 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3292, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%3293 = torch_c.to_builtin_tensor %3240 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3619 = arith.constant 1 : index | |
%dim_3620 = tensor.dim %3293, %c1_3619 : tensor<1x?x32x100xf16> | |
%3294 = flow.tensor.bitcast %3293 : tensor<1x?x32x100xf16>{%dim_3620} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3620} | |
%3295 = torch_c.from_builtin_tensor %3294 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %3295, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%3296 = torch.aten.mul.Tensor %3295, %3292 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %3296, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%3297 = torch_c.to_builtin_tensor %3296 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3621 = arith.constant 1 : index | |
%dim_3622 = tensor.dim %3297, %c1_3621 : tensor<1x?x32x50xcomplex<f32>> | |
%3298 = flow.tensor.bitcast %3297 : tensor<1x?x32x50xcomplex<f32>>{%dim_3622} -> tensor<1x?x32x100xf32>{%dim_3622} | |
%3299 = torch_c.from_builtin_tensor %3298 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %3299, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3623 = torch.constant.int 5 | |
%3300 = torch.prims.convert_element_type %3299, %int5_3623 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3300, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_3624 = torch.constant.int 52 | |
%3301 = torch.aten.mul.Scalar %arg2, %int52_3624 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3301, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int30 = torch.constant.int 30 | |
%int1_3625 = torch.constant.int 1 | |
%3302 = torch.aten.add.Scalar %3301, %int30, %int1_3625 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3302, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_3626 = torch.constant.int 1 | |
%int16_3627 = torch.constant.int 16 | |
%int32_3628 = torch.constant.int 32 | |
%int100_3629 = torch.constant.int 100 | |
%3303 = torch.prim.ListConstruct %int1_3626, %368, %int16_3627, %int32_3628, %int100_3629 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3304 = torch.aten.view %3300, %3303 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %3304, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3630 = torch.constant.int 16 | |
%int32_3631 = torch.constant.int 32 | |
%int100_3632 = torch.constant.int 100 | |
%3305 = torch.prim.ListConstruct %368, %int16_3630, %int32_3631, %int100_3632 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3306 = torch.aten.view %3304, %3305 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3306, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%3307 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%3308 = torch.aten.view %3302, %3307 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3308, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_3633 = torch.constant.int 1 | |
%int16_3634 = torch.constant.int 16 | |
%int32_3635 = torch.constant.int 32 | |
%int100_3636 = torch.constant.int 100 | |
%3309 = torch.prim.ListConstruct %int1_3633, %368, %int16_3634, %int32_3635, %int100_3636 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3310 = torch.aten.view %3242, %3309 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %3310, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3637 = torch.constant.int 16 | |
%int32_3638 = torch.constant.int 32 | |
%int100_3639 = torch.constant.int 100 | |
%3311 = torch.prim.ListConstruct %368, %int16_3637, %int32_3638, %int100_3639 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3312 = torch.aten.view %3310, %3311 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3312, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_3640 = torch.constant.int 1 | |
%int1_3641 = torch.constant.int 1 | |
%3313 = torch.aten.add.Scalar %3302, %int1_3640, %int1_3641 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3313, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%3314 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%3315 = torch.aten.view %3313, %3314 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3315, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%3316 = torch.prim.ListConstruct %3308, %3315 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_3642 = torch.constant.int 0 | |
%3317 = torch.aten.cat %3316, %int0_3642 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3317, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%3318 = torch.prim.ListConstruct %3306, %3312 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_3643 = torch.constant.int 0 | |
%3319 = torch.aten.cat %3318, %int0_3643 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3319, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3644 = torch.constant.int 26 | |
%int2_3645 = torch.constant.int 2 | |
%int16_3646 = torch.constant.int 16 | |
%int32_3647 = torch.constant.int 32 | |
%int100_3648 = torch.constant.int 100 | |
%3320 = torch.prim.ListConstruct %359, %int26_3644, %int2_3645, %int16_3646, %int32_3647, %int100_3648 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3321 = torch.aten.view %3135, %3320 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %3321, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_3649 = torch.constant.int 26 | |
%3322 = torch.aten.mul.int %359, %int26_3649 : !torch.int, !torch.int -> !torch.int | |
%int2_3650 = torch.constant.int 2 | |
%3323 = torch.aten.mul.int %3322, %int2_3650 : !torch.int, !torch.int -> !torch.int | |
%int16_3651 = torch.constant.int 16 | |
%int32_3652 = torch.constant.int 32 | |
%int100_3653 = torch.constant.int 100 | |
%3324 = torch.prim.ListConstruct %3323, %int16_3651, %int32_3652, %int100_3653 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3325 = torch.aten.view %3321, %3324 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3325, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%3326 = torch.prim.ListConstruct %3317 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_3654 = torch.constant.bool false | |
%3327 = torch.aten.index_put %3325, %3326, %3319, %false_3654 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3327, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3655 = torch.constant.int 26 | |
%int2_3656 = torch.constant.int 2 | |
%int16_3657 = torch.constant.int 16 | |
%int32_3658 = torch.constant.int 32 | |
%int100_3659 = torch.constant.int 100 | |
%3328 = torch.prim.ListConstruct %359, %int26_3655, %int2_3656, %int16_3657, %int32_3658, %int100_3659 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3329 = torch.aten.view %3327, %3328 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %3329, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_3660 = torch.constant.int 2662400 | |
%3330 = torch.prim.ListConstruct %359, %int2662400_3660 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3331 = torch.aten.view %3329, %3330 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %3331, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_3661 = torch.constant.int 1 | |
%int2_3662 = torch.constant.int 2 | |
%3332 = torch.aten.transpose.int %3271, %int1_3661, %int2_3662 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3332, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3663 = torch.constant.int 1 | |
%int2_3664 = torch.constant.int 2 | |
%3333 = torch.aten.transpose.int %3300, %int1_3663, %int2_3664 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3333, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3665 = torch.constant.int 1 | |
%int2_3666 = torch.constant.int 2 | |
%3334 = torch.aten.transpose.int %3242, %int1_3665, %int2_3666 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3334, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_3667 = torch.constant.int 2 | |
%int3_3668 = torch.constant.int 3 | |
%3335 = torch.aten.transpose.int %3333, %int2_3667, %int3_3668 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %3335, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_3669 = torch.constant.int 1 | |
%int32_3670 = torch.constant.int 32 | |
%int100_3671 = torch.constant.int 100 | |
%3336 = torch.prim.ListConstruct %int1_3669, %int32_3670, %3257, %int100_3671 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3672 = torch.constant.bool false | |
%3337 = torch.aten.expand %3332, %3336, %false_3672 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3337, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3673 = torch.constant.int 32 | |
%int100_3674 = torch.constant.int 100 | |
%3338 = torch.prim.ListConstruct %int32_3673, %3257, %int100_3674 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3339 = torch.aten.view %3337, %3338 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3339, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3675 = torch.constant.int 1 | |
%int32_3676 = torch.constant.int 32 | |
%int100_3677 = torch.constant.int 100 | |
%3340 = torch.prim.ListConstruct %int1_3675, %int32_3676, %int100_3677, %3286 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3678 = torch.constant.bool false | |
%3341 = torch.aten.expand %3335, %3340, %false_3678 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %3341, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_3679 = torch.constant.int 32 | |
%int100_3680 = torch.constant.int 100 | |
%3342 = torch.prim.ListConstruct %int32_3679, %int100_3680, %3286 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3343 = torch.aten.view %3341, %3342 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %3343, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%3344 = torch.aten.bmm %3339, %3343 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %3344, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3681 = torch.constant.int 1 | |
%int32_3682 = torch.constant.int 32 | |
%3345 = torch.prim.ListConstruct %int1_3681, %int32_3682, %3257, %3286 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3346 = torch.aten.view %3344, %3345 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3346, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_3683 = torch.constant.float 1.000000e+01 | |
%3347 = torch.aten.div.Scalar %3346, %float1.000000e01_3683 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3347, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3684 = torch.constant.int 1 | |
%3348 = torch.aten.add.Tensor %3347, %266, %int1_3684 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3348, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_3685 = torch.constant.int 6 | |
%3349 = torch.prims.convert_element_type %3348, %int6_3685 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %3349, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_3686 = torch.constant.int -1 | |
%false_3687 = torch.constant.bool false | |
%3350 = torch.aten._softmax %3349, %int-1_3686, %false_3687 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %3350, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_3688 = torch.constant.int 5 | |
%3351 = torch.prims.convert_element_type %3350, %int5_3688 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3351, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3689 = torch.constant.int 1 | |
%int32_3690 = torch.constant.int 32 | |
%3352 = torch.prim.ListConstruct %int1_3689, %int32_3690, %3257, %3286 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3691 = torch.constant.bool false | |
%3353 = torch.aten.expand %3351, %3352, %false_3691 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3353, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_3692 = torch.constant.int 32 | |
%3354 = torch.prim.ListConstruct %int32_3692, %3257, %3286 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3355 = torch.aten.view %3353, %3354 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %3355, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3693 = torch.constant.int 1 | |
%3356 = torch.aten.size.int %3236, %int1_3693 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_3694 = torch.constant.int 1 | |
%int32_3695 = torch.constant.int 32 | |
%int100_3696 = torch.constant.int 100 | |
%3357 = torch.prim.ListConstruct %int1_3694, %int32_3695, %3356, %int100_3696 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3697 = torch.constant.bool false | |
%3358 = torch.aten.expand %3334, %3357, %false_3697 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3358, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3698 = torch.constant.int 32 | |
%int100_3699 = torch.constant.int 100 | |
%3359 = torch.prim.ListConstruct %int32_3698, %3356, %int100_3699 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3360 = torch.aten.view %3358, %3359 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3360, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%3361 = torch.aten.bmm %3355, %3360 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3361, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3700 = torch.constant.int 1 | |
%int32_3701 = torch.constant.int 32 | |
%int100_3702 = torch.constant.int 100 | |
%3362 = torch.prim.ListConstruct %int1_3700, %int32_3701, %3257, %int100_3702 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3363 = torch.aten.view %3361, %3362 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3363, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3703 = torch.constant.int 1 | |
%int2_3704 = torch.constant.int 2 | |
%3364 = torch.aten.transpose.int %3363, %int1_3703, %int2_3704 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3364, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_3705 = torch.constant.int 0 | |
%3365 = torch.aten.clone %3364, %int0_3705 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3365, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3706 = torch.constant.int 1 | |
%int3200_3707 = torch.constant.int 3200 | |
%3366 = torch.prim.ListConstruct %int1_3706, %3257, %int3200_3707 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3367 = torch.aten._unsafe_view %3365, %3366 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3367, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3708 = torch.constant.int -2 | |
%int-1_3709 = torch.constant.int -1 | |
%3368 = torch.aten.transpose.int %140, %int-2_3708, %int-1_3709 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3710 = torch.constant.int 3200 | |
%3369 = torch.prim.ListConstruct %3257, %int3200_3710 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3370 = torch.aten.view %3367, %3369 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3370, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3371 = torch.aten.mm %3370, %3368 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3371, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3711 = torch.constant.int 1 | |
%int3200_3712 = torch.constant.int 3200 | |
%3372 = torch.prim.ListConstruct %int1_3711, %3257, %int3200_3712 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3373 = torch.aten.view %3371, %3372 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3373, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3713 = torch.constant.int 1 | |
%3374 = torch.aten.add.Tensor %3209, %3373, %int1_3713 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3374, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3714 = torch.constant.int 6 | |
%3375 = torch.prims.convert_element_type %3374, %int6_3714 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3375, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3715 = torch.constant.int 2 | |
%3376 = torch.aten.pow.Tensor_Scalar %3375, %int2_3715 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3376, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3716 = torch.constant.int -1 | |
%3377 = torch.prim.ListConstruct %int-1_3716 : (!torch.int) -> !torch.list<int> | |
%true_3717 = torch.constant.bool true | |
%none_3718 = torch.constant.none | |
%3378 = torch.aten.mean.dim %3376, %3377, %true_3717, %none_3718 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3378, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3719 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3720 = torch.constant.int 1 | |
%3379 = torch.aten.add.Scalar %3378, %float9.999990e-07_3719, %int1_3720 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3379, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3380 = torch.aten.rsqrt %3379 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3380, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3381 = torch.aten.mul.Tensor %3375, %3380 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3381, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%3382 = torch.aten.mul.Tensor %141, %3381 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3382, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3721 = torch.constant.int 5 | |
%3383 = torch.prims.convert_element_type %3382, %int5_3721 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3383, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3722 = torch.constant.int -2 | |
%int-1_3723 = torch.constant.int -1 | |
%3384 = torch.aten.transpose.int %142, %int-2_3722, %int-1_3723 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3724 = torch.constant.int 3200 | |
%3385 = torch.prim.ListConstruct %240, %int3200_3724 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3386 = torch.aten.view %3383, %3385 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3386, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3387 = torch.aten.mm %3386, %3384 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3387, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3725 = torch.constant.int 1 | |
%int8640_3726 = torch.constant.int 8640 | |
%3388 = torch.prim.ListConstruct %int1_3725, %240, %int8640_3726 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3389 = torch.aten.view %3387, %3388 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3389, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%3390 = torch.aten.silu %3389 : !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3390, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3727 = torch.constant.int -2 | |
%int-1_3728 = torch.constant.int -1 | |
%3391 = torch.aten.transpose.int %143, %int-2_3727, %int-1_3728 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3729 = torch.constant.int 3200 | |
%3392 = torch.prim.ListConstruct %240, %int3200_3729 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3393 = torch.aten.view %3383, %3392 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3393, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3394 = torch.aten.mm %3393, %3391 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3394, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3730 = torch.constant.int 1 | |
%int8640_3731 = torch.constant.int 8640 | |
%3395 = torch.prim.ListConstruct %int1_3730, %240, %int8640_3731 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3396 = torch.aten.view %3394, %3395 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3396, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%3397 = torch.aten.mul.Tensor %3390, %3396 : !torch.vtensor<[1,?,8640],f16>, !torch.vtensor<[1,?,8640],f16> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3397, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%int-2_3732 = torch.constant.int -2 | |
%int-1_3733 = torch.constant.int -1 | |
%3398 = torch.aten.transpose.int %144, %int-2_3732, %int-1_3733 : !torch.vtensor<[3200,8640],f16>, !torch.int, !torch.int -> !torch.vtensor<[8640,3200],f16> | |
%int1_3734 = torch.constant.int 1 | |
%3399 = torch.aten.size.int %3389, %int1_3734 : !torch.vtensor<[1,?,8640],f16>, !torch.int -> !torch.int | |
%int8640_3735 = torch.constant.int 8640 | |
%3400 = torch.prim.ListConstruct %3399, %int8640_3735 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3401 = torch.aten.view %3397, %3400 : !torch.vtensor<[1,?,8640],f16>, !torch.list<int> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3401, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%3402 = torch.aten.mm %3401, %3398 : !torch.vtensor<[?,8640],f16>, !torch.vtensor<[8640,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3402, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3736 = torch.constant.int 1 | |
%int3200_3737 = torch.constant.int 3200 | |
%3403 = torch.prim.ListConstruct %int1_3736, %3399, %int3200_3737 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3404 = torch.aten.view %3402, %3403 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3404, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3738 = torch.constant.int 1 | |
%3405 = torch.aten.add.Tensor %3374, %3404, %int1_3738 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3405, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3739 = torch.constant.int 6 | |
%3406 = torch.prims.convert_element_type %3405, %int6_3739 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3406, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3740 = torch.constant.int 2 | |
%3407 = torch.aten.pow.Tensor_Scalar %3406, %int2_3740 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3407, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3741 = torch.constant.int -1 | |
%3408 = torch.prim.ListConstruct %int-1_3741 : (!torch.int) -> !torch.list<int> | |
%true_3742 = torch.constant.bool true | |
%none_3743 = torch.constant.none | |
%3409 = torch.aten.mean.dim %3407, %3408, %true_3742, %none_3743 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3409, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3744 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3745 = torch.constant.int 1 | |
%3410 = torch.aten.add.Scalar %3409, %float9.999990e-07_3744, %int1_3745 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3410, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3411 = torch.aten.rsqrt %3410 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3411, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3412 = torch.aten.mul.Tensor %3406, %3411 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3412, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%3413 = torch.aten.mul.Tensor %145, %3412 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3413, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3746 = torch.constant.int 5 | |
%3414 = torch.prims.convert_element_type %3413, %int5_3746 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3414, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3747 = torch.constant.int -2 | |
%int-1_3748 = torch.constant.int -1 | |
%3415 = torch.aten.transpose.int %146, %int-2_3747, %int-1_3748 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3749 = torch.constant.int 3200 | |
%3416 = torch.prim.ListConstruct %240, %int3200_3749 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3417 = torch.aten.view %3414, %3416 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3417, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3418 = torch.aten.mm %3417, %3415 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3418, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3750 = torch.constant.int 1 | |
%int3200_3751 = torch.constant.int 3200 | |
%3419 = torch.prim.ListConstruct %int1_3750, %240, %int3200_3751 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3420 = torch.aten.view %3418, %3419 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3420, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3752 = torch.constant.int -2 | |
%int-1_3753 = torch.constant.int -1 | |
%3421 = torch.aten.transpose.int %147, %int-2_3752, %int-1_3753 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3754 = torch.constant.int 3200 | |
%3422 = torch.prim.ListConstruct %240, %int3200_3754 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3423 = torch.aten.view %3414, %3422 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3423, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3424 = torch.aten.mm %3423, %3421 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3424, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3755 = torch.constant.int 1 | |
%int3200_3756 = torch.constant.int 3200 | |
%3425 = torch.prim.ListConstruct %int1_3755, %240, %int3200_3756 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3426 = torch.aten.view %3424, %3425 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3426, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3757 = torch.constant.int -2 | |
%int-1_3758 = torch.constant.int -1 | |
%3427 = torch.aten.transpose.int %148, %int-2_3757, %int-1_3758 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3759 = torch.constant.int 3200 | |
%3428 = torch.prim.ListConstruct %240, %int3200_3759 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3429 = torch.aten.view %3414, %3428 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3429, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3430 = torch.aten.mm %3429, %3427 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3430, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3760 = torch.constant.int 1 | |
%int3200_3761 = torch.constant.int 3200 | |
%3431 = torch.prim.ListConstruct %int1_3760, %240, %int3200_3761 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3432 = torch.aten.view %3430, %3431 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3432, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3762 = torch.constant.int 1 | |
%int32_3763 = torch.constant.int 32 | |
%int100_3764 = torch.constant.int 100 | |
%3433 = torch.prim.ListConstruct %int1_3762, %240, %int32_3763, %int100_3764 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3434 = torch.aten.view %3420, %3433 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3434, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3765 = torch.constant.int 1 | |
%int32_3766 = torch.constant.int 32 | |
%int100_3767 = torch.constant.int 100 | |
%3435 = torch.prim.ListConstruct %int1_3765, %240, %int32_3766, %int100_3767 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3436 = torch.aten.view %3426, %3435 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3436, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3768 = torch.constant.int 1 | |
%int32_3769 = torch.constant.int 32 | |
%int100_3770 = torch.constant.int 100 | |
%3437 = torch.prim.ListConstruct %int1_3768, %240, %int32_3769, %int100_3770 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3438 = torch.aten.view %3432, %3437 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3438, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3771 = torch.constant.int 2048 | |
%none_3772 = torch.constant.none | |
%none_3773 = torch.constant.none | |
%cpu_3774 = torch.constant.device "cpu" | |
%false_3775 = torch.constant.bool false | |
%3439 = torch.aten.arange %int2048_3771, %none_3772, %none_3773, %cpu_3774, %false_3775 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3776 = torch.constant.int 0 | |
%int100_3777 = torch.constant.int 100 | |
%int2_3778 = torch.constant.int 2 | |
%none_3779 = torch.constant.none | |
%none_3780 = torch.constant.none | |
%cpu_3781 = torch.constant.device "cpu" | |
%false_3782 = torch.constant.bool false | |
%3440 = torch.aten.arange.start_step %int0_3776, %int100_3777, %int2_3778, %none_3779, %none_3780, %cpu_3781, %false_3782 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3783 = torch.constant.int 0 | |
%int0_3784 = torch.constant.int 0 | |
%int50_3785 = torch.constant.int 50 | |
%int1_3786 = torch.constant.int 1 | |
%3441 = torch.aten.slice.Tensor %3440, %int0_3783, %int0_3784, %int50_3785, %int1_3786 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3787 = torch.constant.int 6 | |
%3442 = torch.prims.convert_element_type %3441, %int6_3787 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3788 = torch.constant.int 100 | |
%3443 = torch.aten.div.Scalar %3442, %int100_3788 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3789 = torch.constant.float 1.000000e+04 | |
%3444 = torch.aten.pow.Scalar %float1.000000e04_3789, %3443 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%3445 = torch.aten.reciprocal %3444 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3790 = torch.constant.float 1.000000e+00 | |
%3446 = torch.aten.mul.Scalar %3445, %float1.000000e00_3790 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3791 = torch.constant.int 2048 | |
%int1_3792 = torch.constant.int 1 | |
%3447 = torch.prim.ListConstruct %int2048_3791, %int1_3792 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3448 = torch.aten.view %3439, %3447 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%3449 = torch.aten.mul.Tensor %3448, %3446 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3450 = torch.aten.cos %3449 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3451 = torch.aten.sin %3449 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3452 = torch.aten.complex %3450, %3451 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3793 = torch.constant.int 1 | |
%3453 = torch.aten.size.int %3420, %int1_3793 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3794 = torch.constant.int 0 | |
%3454 = torch.aten.add.int %int0_3794, %3453 : !torch.int, !torch.int -> !torch.int | |
%int0_3795 = torch.constant.int 0 | |
%int0_3796 = torch.constant.int 0 | |
%int1_3797 = torch.constant.int 1 | |
%3455 = torch.aten.slice.Tensor %3452, %int0_3795, %int0_3796, %3454, %int1_3797 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3455, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3798 = torch.constant.int 1 | |
%int0_3799 = torch.constant.int 0 | |
%int9223372036854775807_3800 = torch.constant.int 9223372036854775807 | |
%int1_3801 = torch.constant.int 1 | |
%3456 = torch.aten.slice.Tensor %3455, %int1_3798, %int0_3799, %int9223372036854775807_3800, %int1_3801 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3456, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3802 = torch.constant.int 0 | |
%3457 = torch.aten.unsqueeze %3456, %int0_3802 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %3457, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3803 = torch.constant.int 2 | |
%3458 = torch.aten.unsqueeze %3457, %int2_3803 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3458, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3804 = torch.constant.int 3 | |
%int0_3805 = torch.constant.int 0 | |
%int9223372036854775807_3806 = torch.constant.int 9223372036854775807 | |
%int1_3807 = torch.constant.int 1 | |
%3459 = torch.aten.slice.Tensor %3458, %int3_3804, %int0_3805, %int9223372036854775807_3806, %int1_3807 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3459, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%3460 = torch_c.to_builtin_tensor %3434 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3808 = arith.constant 1 : index | |
%dim_3809 = tensor.dim %3460, %c1_3808 : tensor<1x?x32x100xf16> | |
%3461 = flow.tensor.bitcast %3460 : tensor<1x?x32x100xf16>{%dim_3809} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3809} | |
%3462 = torch_c.from_builtin_tensor %3461 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %3462, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%3463 = torch.aten.mul.Tensor %3462, %3459 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %3463, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%3464 = torch_c.to_builtin_tensor %3463 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3810 = arith.constant 1 : index | |
%dim_3811 = tensor.dim %3464, %c1_3810 : tensor<1x?x32x50xcomplex<f32>> | |
%3465 = flow.tensor.bitcast %3464 : tensor<1x?x32x50xcomplex<f32>>{%dim_3811} -> tensor<1x?x32x100xf32>{%dim_3811} | |
%3466 = torch_c.from_builtin_tensor %3465 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %3466, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3812 = torch.constant.int 5 | |
%3467 = torch.prims.convert_element_type %3466, %int5_3812 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3467, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int2048_3813 = torch.constant.int 2048 | |
%none_3814 = torch.constant.none | |
%none_3815 = torch.constant.none | |
%cpu_3816 = torch.constant.device "cpu" | |
%false_3817 = torch.constant.bool false | |
%3468 = torch.aten.arange %int2048_3813, %none_3814, %none_3815, %cpu_3816, %false_3817 : !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[2048],si64> | |
%int0_3818 = torch.constant.int 0 | |
%int100_3819 = torch.constant.int 100 | |
%int2_3820 = torch.constant.int 2 | |
%none_3821 = torch.constant.none | |
%none_3822 = torch.constant.none | |
%cpu_3823 = torch.constant.device "cpu" | |
%false_3824 = torch.constant.bool false | |
%3469 = torch.aten.arange.start_step %int0_3818, %int100_3819, %int2_3820, %none_3821, %none_3822, %cpu_3823, %false_3824 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[50],si64> | |
%int0_3825 = torch.constant.int 0 | |
%int0_3826 = torch.constant.int 0 | |
%int50_3827 = torch.constant.int 50 | |
%int1_3828 = torch.constant.int 1 | |
%3470 = torch.aten.slice.Tensor %3469, %int0_3825, %int0_3826, %int50_3827, %int1_3828 : !torch.vtensor<[50],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[50],si64> | |
%int6_3829 = torch.constant.int 6 | |
%3471 = torch.prims.convert_element_type %3470, %int6_3829 : !torch.vtensor<[50],si64>, !torch.int -> !torch.vtensor<[50],f32> | |
%int100_3830 = torch.constant.int 100 | |
%3472 = torch.aten.div.Scalar %3471, %int100_3830 : !torch.vtensor<[50],f32>, !torch.int -> !torch.vtensor<[50],f32> | |
%float1.000000e04_3831 = torch.constant.float 1.000000e+04 | |
%3473 = torch.aten.pow.Scalar %float1.000000e04_3831, %3472 : !torch.float, !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%3474 = torch.aten.reciprocal %3473 : !torch.vtensor<[50],f32> -> !torch.vtensor<[50],f32> | |
%float1.000000e00_3832 = torch.constant.float 1.000000e+00 | |
%3475 = torch.aten.mul.Scalar %3474, %float1.000000e00_3832 : !torch.vtensor<[50],f32>, !torch.float -> !torch.vtensor<[50],f32> | |
%int2048_3833 = torch.constant.int 2048 | |
%int1_3834 = torch.constant.int 1 | |
%3476 = torch.prim.ListConstruct %int2048_3833, %int1_3834 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3477 = torch.aten.view %3468, %3476 : !torch.vtensor<[2048],si64>, !torch.list<int> -> !torch.vtensor<[2048,1],si64> | |
%3478 = torch.aten.mul.Tensor %3477, %3475 : !torch.vtensor<[2048,1],si64>, !torch.vtensor<[50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3479 = torch.aten.cos %3478 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3480 = torch.aten.sin %3478 : !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],f32> | |
%3481 = torch.aten.complex %3479, %3480 : !torch.vtensor<[2048,50],f32>, !torch.vtensor<[2048,50],f32> -> !torch.vtensor<[2048,50],complex<f32>> | |
%int1_3835 = torch.constant.int 1 | |
%3482 = torch.aten.size.int %3426, %int1_3835 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int0_3836 = torch.constant.int 0 | |
%3483 = torch.aten.add.int %int0_3836, %3482 : !torch.int, !torch.int -> !torch.int | |
%int0_3837 = torch.constant.int 0 | |
%int0_3838 = torch.constant.int 0 | |
%int1_3839 = torch.constant.int 1 | |
%3484 = torch.aten.slice.Tensor %3481, %int0_3837, %int0_3838, %3483, %int1_3839 : !torch.vtensor<[2048,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3484, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int1_3840 = torch.constant.int 1 | |
%int0_3841 = torch.constant.int 0 | |
%int9223372036854775807_3842 = torch.constant.int 9223372036854775807 | |
%int1_3843 = torch.constant.int 1 | |
%3485 = torch.aten.slice.Tensor %3484, %int1_3840, %int0_3841, %int9223372036854775807_3842, %int1_3843 : !torch.vtensor<[?,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[?,50],complex<f32>> | |
torch.bind_symbolic_shape %3485, [%238], affine_map<()[s0] -> (s0 * 16, 50)> : !torch.vtensor<[?,50],complex<f32>> | |
%int0_3844 = torch.constant.int 0 | |
%3486 = torch.aten.unsqueeze %3485, %int0_3844 : !torch.vtensor<[?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,50],complex<f32>> | |
torch.bind_symbolic_shape %3486, [%238], affine_map<()[s0] -> (1, s0 * 16, 50)> : !torch.vtensor<[1,?,50],complex<f32>> | |
%int2_3845 = torch.constant.int 2 | |
%3487 = torch.aten.unsqueeze %3486, %int2_3845 : !torch.vtensor<[1,?,50],complex<f32>>, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3487, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%int3_3846 = torch.constant.int 3 | |
%int0_3847 = torch.constant.int 0 | |
%int9223372036854775807_3848 = torch.constant.int 9223372036854775807 | |
%int1_3849 = torch.constant.int 1 | |
%3488 = torch.aten.slice.Tensor %3487, %int3_3846, %int0_3847, %int9223372036854775807_3848, %int1_3849 : !torch.vtensor<[1,?,1,50],complex<f32>>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,?,1,50],complex<f32>> | |
torch.bind_symbolic_shape %3488, [%238], affine_map<()[s0] -> (1, s0 * 16, 1, 50)> : !torch.vtensor<[1,?,1,50],complex<f32>> | |
%3489 = torch_c.to_builtin_tensor %3436 : !torch.vtensor<[1,?,32,100],f16> -> tensor<1x?x32x100xf16> | |
%c1_3850 = arith.constant 1 : index | |
%dim_3851 = tensor.dim %3489, %c1_3850 : tensor<1x?x32x100xf16> | |
%3490 = flow.tensor.bitcast %3489 : tensor<1x?x32x100xf16>{%dim_3851} -> tensor<1x?x32x50xcomplex<f16>>{%dim_3851} | |
%3491 = torch_c.from_builtin_tensor %3490 : tensor<1x?x32x50xcomplex<f16>> -> !torch.vtensor<[1,?,32,50],complex<f16>> | |
torch.bind_symbolic_shape %3491, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f16>> | |
%3492 = torch.aten.mul.Tensor %3491, %3488 : !torch.vtensor<[1,?,32,50],complex<f16>>, !torch.vtensor<[1,?,1,50],complex<f32>> -> !torch.vtensor<[1,?,32,50],complex<f32>> | |
torch.bind_symbolic_shape %3492, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 50)> : !torch.vtensor<[1,?,32,50],complex<f32>> | |
%3493 = torch_c.to_builtin_tensor %3492 : !torch.vtensor<[1,?,32,50],complex<f32>> -> tensor<1x?x32x50xcomplex<f32>> | |
%c1_3852 = arith.constant 1 : index | |
%dim_3853 = tensor.dim %3493, %c1_3852 : tensor<1x?x32x50xcomplex<f32>> | |
%3494 = flow.tensor.bitcast %3493 : tensor<1x?x32x50xcomplex<f32>>{%dim_3853} -> tensor<1x?x32x100xf32>{%dim_3853} | |
%3495 = torch_c.from_builtin_tensor %3494 : tensor<1x?x32x100xf32> -> !torch.vtensor<[1,?,32,100],f32> | |
torch.bind_symbolic_shape %3495, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f32> | |
%int5_3854 = torch.constant.int 5 | |
%3496 = torch.prims.convert_element_type %3495, %int5_3854 : !torch.vtensor<[1,?,32,100],f32>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3496, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int52_3855 = torch.constant.int 52 | |
%3497 = torch.aten.mul.Scalar %arg2, %int52_3855 : !torch.vtensor<[1,?],si64>, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3497, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int32_3856 = torch.constant.int 32 | |
%int1_3857 = torch.constant.int 1 | |
%3498 = torch.aten.add.Scalar %3497, %int32_3856, %int1_3857 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3498, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%int1_3858 = torch.constant.int 1 | |
%int16_3859 = torch.constant.int 16 | |
%int32_3860 = torch.constant.int 32 | |
%int100_3861 = torch.constant.int 100 | |
%3499 = torch.prim.ListConstruct %int1_3858, %368, %int16_3859, %int32_3860, %int100_3861 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3500 = torch.aten.view %3496, %3499 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %3500, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3862 = torch.constant.int 16 | |
%int32_3863 = torch.constant.int 32 | |
%int100_3864 = torch.constant.int 100 | |
%3501 = torch.prim.ListConstruct %368, %int16_3862, %int32_3863, %int100_3864 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3502 = torch.aten.view %3500, %3501 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3502, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%3503 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%3504 = torch.aten.view %3498, %3503 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3504, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%int1_3865 = torch.constant.int 1 | |
%int16_3866 = torch.constant.int 16 | |
%int32_3867 = torch.constant.int 32 | |
%int100_3868 = torch.constant.int 100 | |
%3505 = torch.prim.ListConstruct %int1_3865, %368, %int16_3866, %int32_3867, %int100_3868 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3506 = torch.aten.view %3438, %3505 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,16,32,100],f16> | |
torch.bind_symbolic_shape %3506, [%238], affine_map<()[s0] -> (1, s0, 16, 32, 100)> : !torch.vtensor<[1,?,16,32,100],f16> | |
%int16_3869 = torch.constant.int 16 | |
%int32_3870 = torch.constant.int 32 | |
%int100_3871 = torch.constant.int 100 | |
%3507 = torch.prim.ListConstruct %368, %int16_3869, %int32_3870, %int100_3871 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3508 = torch.aten.view %3506, %3507 : !torch.vtensor<[1,?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3508, [%238], affine_map<()[s0] -> (s0, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int1_3872 = torch.constant.int 1 | |
%int1_3873 = torch.constant.int 1 | |
%3509 = torch.aten.add.Scalar %3498, %int1_3872, %int1_3873 : !torch.vtensor<[1,?],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,?],si64> | |
torch.bind_symbolic_shape %3509, [%238], affine_map<()[s0] -> (1, s0)> : !torch.vtensor<[1,?],si64> | |
%3510 = torch.prim.ListConstruct %368 : (!torch.int) -> !torch.list<int> | |
%3511 = torch.aten.view %3509, %3510 : !torch.vtensor<[1,?],si64>, !torch.list<int> -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3511, [%238], affine_map<()[s0] -> (s0)> : !torch.vtensor<[?],si64> | |
%3512 = torch.prim.ListConstruct %3504, %3511 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
%int0_3874 = torch.constant.int 0 | |
%3513 = torch.aten.cat %3512, %int0_3874 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?],si64> | |
torch.bind_symbolic_shape %3513, [%238], affine_map<()[s0] -> (s0 * 2)> : !torch.vtensor<[?],si64> | |
%3514 = torch.prim.ListConstruct %3502, %3508 : (!torch.vtensor<[?,16,32,100],f16>, !torch.vtensor<[?,16,32,100],f16>) -> !torch.list<vtensor> | |
%int0_3875 = torch.constant.int 0 | |
%3515 = torch.aten.cat %3514, %int0_3875 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3515, [%238], affine_map<()[s0] -> (s0 * 2, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3876 = torch.constant.int 26 | |
%int2_3877 = torch.constant.int 2 | |
%int16_3878 = torch.constant.int 16 | |
%int32_3879 = torch.constant.int 32 | |
%int100_3880 = torch.constant.int 100 | |
%3516 = torch.prim.ListConstruct %359, %int26_3876, %int2_3877, %int16_3878, %int32_3879, %int100_3880 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3517 = torch.aten.view %3331, %3516 : !torch.vtensor<[?,2662400],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %3517, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int26_3881 = torch.constant.int 26 | |
%3518 = torch.aten.mul.int %359, %int26_3881 : !torch.int, !torch.int -> !torch.int | |
%int2_3882 = torch.constant.int 2 | |
%3519 = torch.aten.mul.int %3518, %int2_3882 : !torch.int, !torch.int -> !torch.int | |
%int16_3883 = torch.constant.int 16 | |
%int32_3884 = torch.constant.int 32 | |
%int100_3885 = torch.constant.int 100 | |
%3520 = torch.prim.ListConstruct %3519, %int16_3883, %int32_3884, %int100_3885 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3521 = torch.aten.view %3517, %3520 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3521, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%3522 = torch.prim.ListConstruct %3513 : (!torch.vtensor<[?],si64>) -> !torch.list<optional<vtensor>> | |
%false_3886 = torch.constant.bool false | |
%3523 = torch.aten.index_put %3521, %3522, %3515, %false_3886 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<optional<vtensor>>, !torch.vtensor<[?,16,32,100],f16>, !torch.bool -> !torch.vtensor<[?,16,32,100],f16> | |
torch.bind_symbolic_shape %3523, [%239], affine_map<()[s0] -> (s0 * 52, 16, 32, 100)> : !torch.vtensor<[?,16,32,100],f16> | |
%int26_3887 = torch.constant.int 26 | |
%int2_3888 = torch.constant.int 2 | |
%int16_3889 = torch.constant.int 16 | |
%int32_3890 = torch.constant.int 32 | |
%int100_3891 = torch.constant.int 100 | |
%3524 = torch.prim.ListConstruct %359, %int26_3887, %int2_3888, %int16_3889, %int32_3890, %int100_3891 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3525 = torch.aten.view %3523, %3524 : !torch.vtensor<[?,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,26,2,16,32,100],f16> | |
torch.bind_symbolic_shape %3525, [%239], affine_map<()[s0] -> (s0, 26, 2, 16, 32, 100)> : !torch.vtensor<[?,26,2,16,32,100],f16> | |
%int2662400_3892 = torch.constant.int 2662400 | |
%3526 = torch.prim.ListConstruct %359, %int2662400_3892 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3527 = torch.aten.view %3525, %3526 : !torch.vtensor<[?,26,2,16,32,100],f16>, !torch.list<int> -> !torch.vtensor<[?,2662400],f16> | |
torch.bind_symbolic_shape %3527, [%239], affine_map<()[s0] -> (s0, 2662400)> : !torch.vtensor<[?,2662400],f16> | |
%int1_3893 = torch.constant.int 1 | |
%int2_3894 = torch.constant.int 2 | |
%3528 = torch.aten.transpose.int %3467, %int1_3893, %int2_3894 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3528, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3895 = torch.constant.int 1 | |
%int2_3896 = torch.constant.int 2 | |
%3529 = torch.aten.transpose.int %3496, %int1_3895, %int2_3896 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3529, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3897 = torch.constant.int 1 | |
%int2_3898 = torch.constant.int 2 | |
%3530 = torch.aten.transpose.int %3438, %int1_3897, %int2_3898 : !torch.vtensor<[1,?,32,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3530, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int2_3899 = torch.constant.int 2 | |
%int3_3900 = torch.constant.int 3 | |
%3531 = torch.aten.transpose.int %3529, %int2_3899, %int3_3900 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %3531, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int1_3901 = torch.constant.int 1 | |
%int32_3902 = torch.constant.int 32 | |
%int100_3903 = torch.constant.int 100 | |
%3532 = torch.prim.ListConstruct %int1_3901, %int32_3902, %3453, %int100_3903 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3904 = torch.constant.bool false | |
%3533 = torch.aten.expand %3528, %3532, %false_3904 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3533, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3905 = torch.constant.int 32 | |
%int100_3906 = torch.constant.int 100 | |
%3534 = torch.prim.ListConstruct %int32_3905, %3453, %int100_3906 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3535 = torch.aten.view %3533, %3534 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3535, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3907 = torch.constant.int 1 | |
%int32_3908 = torch.constant.int 32 | |
%int100_3909 = torch.constant.int 100 | |
%3536 = torch.prim.ListConstruct %int1_3907, %int32_3908, %int100_3909, %3482 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3910 = torch.constant.bool false | |
%3537 = torch.aten.expand %3531, %3536, %false_3910 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,100,?],f16> | |
torch.bind_symbolic_shape %3537, [%238], affine_map<()[s0] -> (1, 32, 100, s0 * 16)> : !torch.vtensor<[1,32,100,?],f16> | |
%int32_3911 = torch.constant.int 32 | |
%int100_3912 = torch.constant.int 100 | |
%3538 = torch.prim.ListConstruct %int32_3911, %int100_3912, %3482 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3539 = torch.aten.view %3537, %3538 : !torch.vtensor<[1,32,100,?],f16>, !torch.list<int> -> !torch.vtensor<[32,100,?],f16> | |
torch.bind_symbolic_shape %3539, [%238], affine_map<()[s0] -> (32, 100, s0 * 16)> : !torch.vtensor<[32,100,?],f16> | |
%3540 = torch.aten.bmm %3535, %3539 : !torch.vtensor<[32,?,100],f16>, !torch.vtensor<[32,100,?],f16> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %3540, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3913 = torch.constant.int 1 | |
%int32_3914 = torch.constant.int 32 | |
%3541 = torch.prim.ListConstruct %int1_3913, %int32_3914, %3453, %3482 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3542 = torch.aten.view %3540, %3541 : !torch.vtensor<[32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3542, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%float1.000000e01_3915 = torch.constant.float 1.000000e+01 | |
%3543 = torch.aten.div.Scalar %3542, %float1.000000e01_3915 : !torch.vtensor<[1,32,?,?],f16>, !torch.float -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3543, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3916 = torch.constant.int 1 | |
%3544 = torch.aten.add.Tensor %3543, %266, %int1_3916 : !torch.vtensor<[1,32,?,?],f16>, !torch.vtensor<[1,1,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3544, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int6_3917 = torch.constant.int 6 | |
%3545 = torch.prims.convert_element_type %3544, %int6_3917 : !torch.vtensor<[1,32,?,?],f16>, !torch.int -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %3545, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int-1_3918 = torch.constant.int -1 | |
%false_3919 = torch.constant.bool false | |
%3546 = torch.aten._softmax %3545, %int-1_3918, %false_3919 : !torch.vtensor<[1,32,?,?],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,32,?,?],f32> | |
torch.bind_symbolic_shape %3546, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f32> | |
%int5_3920 = torch.constant.int 5 | |
%3547 = torch.prims.convert_element_type %3546, %int5_3920 : !torch.vtensor<[1,32,?,?],f32>, !torch.int -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3547, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int1_3921 = torch.constant.int 1 | |
%int32_3922 = torch.constant.int 32 | |
%3548 = torch.prim.ListConstruct %int1_3921, %int32_3922, %3453, %3482 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3923 = torch.constant.bool false | |
%3549 = torch.aten.expand %3547, %3548, %false_3923 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,?],f16> | |
torch.bind_symbolic_shape %3549, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, s0 * 16)> : !torch.vtensor<[1,32,?,?],f16> | |
%int32_3924 = torch.constant.int 32 | |
%3550 = torch.prim.ListConstruct %int32_3924, %3453, %3482 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3551 = torch.aten.view %3549, %3550 : !torch.vtensor<[1,32,?,?],f16>, !torch.list<int> -> !torch.vtensor<[32,?,?],f16> | |
torch.bind_symbolic_shape %3551, [%238], affine_map<()[s0] -> (32, s0 * 16, s0 * 16)> : !torch.vtensor<[32,?,?],f16> | |
%int1_3925 = torch.constant.int 1 | |
%3552 = torch.aten.size.int %3432, %int1_3925 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.int | |
%int1_3926 = torch.constant.int 1 | |
%int32_3927 = torch.constant.int 32 | |
%int100_3928 = torch.constant.int 100 | |
%3553 = torch.prim.ListConstruct %int1_3926, %int32_3927, %3552, %int100_3928 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%false_3929 = torch.constant.bool false | |
%3554 = torch.aten.expand %3530, %3553, %false_3929 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3554, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int32_3930 = torch.constant.int 32 | |
%int100_3931 = torch.constant.int 100 | |
%3555 = torch.prim.ListConstruct %int32_3930, %3552, %int100_3931 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3556 = torch.aten.view %3554, %3555 : !torch.vtensor<[1,32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3556, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%3557 = torch.aten.bmm %3551, %3556 : !torch.vtensor<[32,?,?],f16>, !torch.vtensor<[32,?,100],f16> -> !torch.vtensor<[32,?,100],f16> | |
torch.bind_symbolic_shape %3557, [%238], affine_map<()[s0] -> (32, s0 * 16, 100)> : !torch.vtensor<[32,?,100],f16> | |
%int1_3932 = torch.constant.int 1 | |
%int32_3933 = torch.constant.int 32 | |
%int100_3934 = torch.constant.int 100 | |
%3558 = torch.prim.ListConstruct %int1_3932, %int32_3933, %3453, %int100_3934 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3559 = torch.aten.view %3557, %3558 : !torch.vtensor<[32,?,100],f16>, !torch.list<int> -> !torch.vtensor<[1,32,?,100],f16> | |
torch.bind_symbolic_shape %3559, [%238], affine_map<()[s0] -> (1, 32, s0 * 16, 100)> : !torch.vtensor<[1,32,?,100],f16> | |
%int1_3935 = torch.constant.int 1 | |
%int2_3936 = torch.constant.int 2 | |
%3560 = torch.aten.transpose.int %3559, %int1_3935, %int2_3936 : !torch.vtensor<[1,32,?,100],f16>, !torch.int, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3560, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int0_3937 = torch.constant.int 0 | |
%3561 = torch.aten.clone %3560, %int0_3937 : !torch.vtensor<[1,?,32,100],f16>, !torch.int -> !torch.vtensor<[1,?,32,100],f16> | |
torch.bind_symbolic_shape %3561, [%238], affine_map<()[s0] -> (1, s0 * 16, 32, 100)> : !torch.vtensor<[1,?,32,100],f16> | |
%int1_3938 = torch.constant.int 1 | |
%int3200_3939 = torch.constant.int 3200 | |
%3562 = torch.prim.ListConstruct %int1_3938, %3453, %int3200_3939 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3563 = torch.aten._unsafe_view %3561, %3562 : !torch.vtensor<[1,?,32,100],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3563, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3940 = torch.constant.int -2 | |
%int-1_3941 = torch.constant.int -1 | |
%3564 = torch.aten.transpose.int %149, %int-2_3940, %int-1_3941 : !torch.vtensor<[3200,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,3200],f16> | |
%int3200_3942 = torch.constant.int 3200 | |
%3565 = torch.prim.ListConstruct %3453, %int3200_3942 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3566 = torch.aten.view %3563, %3565 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3566, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3567 = torch.aten.mm %3566, %3564 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,3200],f16> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3567, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%int1_3943 = torch.constant.int 1 | |
%int3200_3944 = torch.constant.int 3200 | |
%3568 = torch.prim.ListConstruct %int1_3943, %3453, %int3200_3944 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3569 = torch.aten.view %3567, %3568 : !torch.vtensor<[?,3200],f16>, !torch.list<int> -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3569, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int1_3945 = torch.constant.int 1 | |
%3570 = torch.aten.add.Tensor %3405, %3569, %int1_3945 : !torch.vtensor<[1,?,3200],f16>, !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3570, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int6_3946 = torch.constant.int 6 | |
%3571 = torch.prims.convert_element_type %3570, %int6_3946 : !torch.vtensor<[1,?,3200],f16>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3571, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int2_3947 = torch.constant.int 2 | |
%3572 = torch.aten.pow.Tensor_Scalar %3571, %int2_3947 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3572, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int-1_3948 = torch.constant.int -1 | |
%3573 = torch.prim.ListConstruct %int-1_3948 : (!torch.int) -> !torch.list<int> | |
%true_3949 = torch.constant.bool true | |
%none_3950 = torch.constant.none | |
%3574 = torch.aten.mean.dim %3572, %3573, %true_3949, %none_3950 : !torch.vtensor<[1,?,3200],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3574, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%float9.999990e-07_3951 = torch.constant.float 9.9999999747524271E-7 | |
%int1_3952 = torch.constant.int 1 | |
%3575 = torch.aten.add.Scalar %3574, %float9.999990e-07_3951, %int1_3952 : !torch.vtensor<[1,?,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3575, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3576 = torch.aten.rsqrt %3575 : !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,1],f32> | |
torch.bind_symbolic_shape %3576, [%238], affine_map<()[s0] -> (1, s0 * 16, 1)> : !torch.vtensor<[1,?,1],f32> | |
%3577 = torch.aten.mul.Tensor %3571, %3576 : !torch.vtensor<[1,?,3200],f32>, !torch.vtensor<[1,?,1],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3577, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%3578 = torch.aten.mul.Tensor %150, %3577 : !torch.vtensor<[3200],f32>, !torch.vtensor<[1,?,3200],f32> -> !torch.vtensor<[1,?,3200],f32> | |
torch.bind_symbolic_shape %3578, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f32> | |
%int5_3953 = torch.constant.int 5 | |
%3579 = torch.prims.convert_element_type %3578, %int5_3953 : !torch.vtensor<[1,?,3200],f32>, !torch.int -> !torch.vtensor<[1,?,3200],f16> | |
torch.bind_symbolic_shape %3579, [%238], affine_map<()[s0] -> (1, s0 * 16, 3200)> : !torch.vtensor<[1,?,3200],f16> | |
%int-2_3954 = torch.constant.int -2 | |
%int-1_3955 = torch.constant.int -1 | |
%3580 = torch.aten.transpose.int %151, %int-2_3954, %int-1_3955 : !torch.vtensor<[8640,3200],f16>, !torch.int, !torch.int -> !torch.vtensor<[3200,8640],f16> | |
%int3200_3956 = torch.constant.int 3200 | |
%3581 = torch.prim.ListConstruct %240, %int3200_3956 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3582 = torch.aten.view %3579, %3581 : !torch.vtensor<[1,?,3200],f16>, !torch.list<int> -> !torch.vtensor<[?,3200],f16> | |
torch.bind_symbolic_shape %3582, [%238], affine_map<()[s0] -> (s0 * 16, 3200)> : !torch.vtensor<[?,3200],f16> | |
%3583 = torch.aten.mm %3582, %3580 : !torch.vtensor<[?,3200],f16>, !torch.vtensor<[3200,8640],f16> -> !torch.vtensor<[?,8640],f16> | |
torch.bind_symbolic_shape %3583, [%238], affine_map<()[s0] -> (s0 * 16, 8640)> : !torch.vtensor<[?,8640],f16> | |
%int1_3957 = torch.constant.int 1 | |
%int8640_3958 = torch.constant.int 8640 | |
%3584 = torch.prim.ListConstruct %int1_3957, %240, %int8640_3958 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%3585 = torch.aten.view %3583, %3584 : !torch.vtensor<[?,8640],f16>, !torch.list<int> -> !torch.vtensor<[1,?,8640],f16> | |
torch.bind_symbolic_shape %3585, [%238], affine_map<()[s0] -> (1, s0 * 16, 8640)> : !torch.vtensor<[1,?,8640],f16> | |
%3586 = torch.aten.silu |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment