Skip to content

Instantly share code, notes, and snippets.

View ppwwyyxx's full-sized avatar
🎯
Focusing

Yuxin Wu ppwwyyxx

🎯
Focusing
View GitHub Profile
# -*- coding: utf-8 -*-
import sys
import pprint
import copy
from collections import OrderedDict
class ComputeAPICall:
def __init__(self, api, original):
self.api = api
self.original = original # original logs
#!/usr/bin/env python
from collections import namedtuple
from Xlib import X
import Xlib.display
from Xlib.ext.xtest import fake_input
class Screen(namedtuple('_Screen', ['x', 'y', 'w', 'h'])):
@property
from detectron2.layers import ShapeSpec
from detectron2.modeling import FPN, GeneralizedRCNN, ResNet, StandardROIHeads
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
from detectron2.modeling.backbone.resnet import BasicStem, BottleneckBlock
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator.rpn import RPN, StandardRPNHead
from detectron2.modeling.roi_heads import FastRCNNOutputLayers, MaskRCNNConvUpsampleHead
#-*- coding: utf-8 -*-
import torch
from torch import nn
from fvcore.nn import FlopCountAnalysis, flop_count_table
from pypapi import events, papi_high as high
def main():
@ppwwyyxx
ppwwyyxx / logging-fix.py
Last active February 11, 2022 06:50
Replace logging.xxx by logger.xxx
#!/usr/bin/env python3
import glob
import os
import sys
import logging
METHODS = ["debug", "info", "warning", "warn", "error", "exception", "critical"]
@ppwwyyxx
ppwwyyxx / fix-intel_wifi_aer-avell_g1513_fire_v3
Created March 26, 2022 23:08 — forked from flisboac/ fix-intel_wifi_aer-avell_g1513_fire_v3
Temporary fix for AER's excessive `severity=Corrected` logging for Intel Wireless (Avell G1513 Fire V3) (Arch Linux)
silly gist hack, why do we need you? :(
from torch import nn
import torch
class ModuleWithLazySubmodules(nn.Module):
def __init__(self, in_dim, middle_dim, submodules):
super().__init__()
self.first_layer = nn.Linear(in_dim, middle_dim)
self.submodules = nn.Sequential(*submodules)
self.forward(torch.rand(1, in_dim))