Skip to content

Instantly share code, notes, and snippets.

% solution for https://puzzlehunt.club.cc.cmu.edu/puzzle/15011/
#script (python)
from clingo import Function
from collections import defaultdict
import operator
from colored import bg, attr
MAP = """
..B#.~....
% solution for https://puzzlehunt.club.cc.cmu.edu/puzzle/15026/
% define a 6x6 grid, and compass directions on this grid
row(1..6).
col(1..6).
dir(n;e;s;w).
delta(n, -1, 0).
from dataclasses import dataclass
from typing import Generator, TypeVar, Any, Callable, Optional, cast, NamedTuple, Dict, Type, Tuple, Generic
R = TypeVar("R")
Eff = Generator[Tuple[Any, ...], Any, R]
def handle_op(
g: Eff[R],
op: Tuple[Any, ...],
# Translation of JIT type to a C++ argument type.
# TODO: remove use_c10_dispatcher_full kwarg from this function; type
# translation ideally doesn't depend on this
def argument_type(t: Type, *, mutable: bool, use_c10_dispatcher_full: bool) -> str:
# If it's a value type, do the value type translation
r = cpp_value_type(t)
if r is not None:
return r
if isinstance(t, BaseType):
diff --git a/build/aten/src/ATen_new/Functions.cpp b/build/aten/src/ATen/Functions.cpp
index 240605711c..2d3aacf897 100644
--- a/build/aten/src/ATen_new/Functions.cpp
+++ b/build/aten/src/ATen/Functions.cpp
@@ -188,7 +188,7 @@ Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_see
static auto op = c10::Dispatcher::singleton()
.findSchemaOrThrow("aten::_cudnn_init_dropout_state", "")
.typed<Tensor (double, bool, int64_t, c10::optional<ScalarType>, c10::optional<Layout>, c10::optional<Device>, c10::optional<bool>)>();
- return op.call(dropout, train, dropout_seed, dtype, layout, device, pin_memory);
+ return op.call(dropout, train, dropout_seed, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
Started by upstream project "pytorch-builds/py3.6-clang7-rocmdeb-ubuntu16.04-trigger" build number 12667
originally caused by:
Started by upstream project "pytorch-pull-request" build number 83727
originally caused by:
GitHub pull request #30821 of commit acba640a916884cb9cedafed807a5610ceec005e, no merge conflicts.
[EnvInject] - Loading node environment variables.
Building remotely on jenkins-worker-rocm-amd-52 (rocm docker) in workspace /var/lib/jenkins/workspace/pytorch-builds/py3.6-clang7-rocmdeb-ubuntu16.04-test2
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Done
[EnvInject] - Injecting environment variables from a build step.
# Naive matrix multiply
for (int i = 0; i < A_size[0]; i++) {
for (int j = 0; j < A_size[1]; j++) {
for (int k = 0; k < B_size[1]; k++) {
C[i*C_size[1] + k] += A[i*A_size[1] + j] + B[j*B_size[1] + k];
}
}
}
[{"url":"https://streeteasy.com/building/sycamore-condominium/10d?featured=1","title":"250 E 30th Street #10D","price":"$3,500 FOR RENT","details":["583 ft²","$72 per ft²","2 rooms","1 bed","1 bath"],"neighborhood":"Kips Bay","vitals":[{"key":"AVAILABLE ON","value":"Available Now"},{"key":"DAYS ON MARKET","value":"16 Days"},{"key":"LAST PRICE CHANGE","value":"No Recorded Changes"}],"description":"Location Location Location! NO FEE …..NO FEE……LUXURY APARTMENT\nModern Appliances, High ceilings, Large windows, Hardwood Floors, Marble bath, Laundry on every floor, Fitness Center, Resident-Only Lounge with entertainment center and wet bar, 24 hour doorman\nThe beautiful tree-lined streets and the East River nearby provide peaceful views for morning runs or afternoon picnics. To the west, Third Avenue has wide-ranging dining choices on every block creating its own restaurant row!\nAt night, the neighborhood movie theater lights up with a selection of films for every taste and the seats in every theater recline.\nLo
at::Tensor AtenXlaType::_copy_from(const at::Tensor& self,
const at::Tensor& dst,
bool non_blocking) const {
// Do not mark the tensor creation as writeable to not discard the XLA tensor
// device context, but make a copy to avoid core data to be shared.
std::vector<at::Tensor> tensors = {self};
auto xla_tensors =
bridge::XlaCreateTensorList(tensors, /*writeable=*/nullptr);
// Hack in an overwrite of a const tensor.
at::Tensor t = CopyTensor(xla_tensors.front(), dst.scalar_type());