Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000
Each of these commands will run an ad hoc http static server in your current (or specified) directory, available at http://localhost:8000. Use this power wisely.
$ python -m SimpleHTTPServer 8000
# Inspiration: https://b.agilob.net/test-coverage-in-gitlab-ci-in-a-golang-project | |
# Navigate to: "Settings" -> "CI/CD" -> "Expand" next to "General pipeline settings" | |
# 1. Add ^coverage:\s(\d+(?:\.\d+)?%) tas your regex in "Test coverage parsing" | |
# 2. Go to "Pipeline status" to get the badges code | |
# This file is a template, and might need editing before it works on your project. | |
image: golang:1.13.4-alpine | |
services: | |
- docker:stable-dind # only docker daemon, so we need to install docker cli and docker compose in separate script |
parameter from request = page and limit | |
so, | |
offset = (page - 1) * limit | |
but you need to check if page <= 0 then page = 1 | |
if limit <= 0 then set it to default minimum limit | |
# WARNING: This docker-compose.yml is only for testing purpose. | |
# Parameters: | |
# - name: CONFLUENT_PLATFORM_VERSION | |
# default: 3.0.0 | |
# reference: https://hub.docker.com/u/confluentinc/ | |
# Ports: | |
# - description: Major ports are exposed to host computer | |
# - zookeeper: 2181 | |
# kafka1: 9091 | |
# kafka2: 9092 |
#!/bin/bash | |
# based on https://lebkowski.name/docker-volumes/ | |
# remove exited containers: | |
docker ps --filter status=dead --filter status=exited -aq | xargs -r docker rm -v | |
docker volume ls -qf dangling=true | xargs -r docker volume rm | |
# remove unused images: | |
docker images --no-trunc | grep '<none>' | awk '{ print $3 }' | xargs -r docker rmi |
package main | |
import ( | |
"context" | |
"fmt" | |
"os" | |
"os/signal" | |
"strings" | |
"syscall" | |
"time" |
package kafka | |
import ( | |
"time" | |
"github.com/segmentio/kafka-go" | |
"github.com/segmentio/kafka-go/snappy" | |
) | |
var writer *kafka.Writer |
# create topic: /opt/kafka/bin/kafka-topics.sh --create --zookeeper zoo1:2181,zoo2:2181 --topic qmessage --partitions 4 --replication-factor 2 | |
# use official zookeeper image since the wurstmeister/zookeeper image does not currently support zookeeper in replicated mode | |
# (https://github.com/wurstmeister/kafka-docker/issues/372#issuecomment-409166940) | |
# for the first run, set the KAFKA_CREATE_TOPICS: 'qmessage:4:2' to create new topic or use the command above. | |
# For every restart, don't set it, since it will recreate the topic, hence your old data will lost and will make your consumer missing some data. | |
version: '2' | |
services: | |
zoo1: | |
image: zookeeper:3.4.13 | |
container_name: zoo1 |
package test.tokenizer.tokenizerId; | |
import java.util.ArrayList; | |
import yusufs.nlp.tokenizerid.Tokenizer;; | |
/** | |
* Hello world! | |
* | |
*/ |
String s = "hksaksaggah";//sample string | |
String temp2="";//string with no duplicates | |
HashMap<Integer, Character> tc = new HashMap<>();//create a hashmap to store the char's | |
char [] charArray = s.toCharArray(); | |
for (Character c : charArray)//for each char | |
{ | |
if (!tc.containsValue(c))//if the char is not already in the hashmap | |
{ | |
temp2=temp2+c.toString();//add the char to the output string | |
tc.put(c.hashCode(),c);//and add the char to the hashmap |