This configuration allows Zed to run tasks with specific working directories for multiple projects.
[
{
"label": "backend",
version: '3.8' | |
networks: | |
my_network: | |
driver: bridge | |
volumes: | |
kafka-data: | |
kafka-ui-data: | |
services: |
use ropey::{Rope, RopeSlice}; | |
use serde::{Deserialize, Serialize}; | |
use std::fmt; | |
use thiserror::Error; | |
use tower_lsp::lsp_types::{Position, TextDocumentContentChangeEvent}; | |
use tree_sitter::{InputEdit, Parser, Point, Tree}; | |
pub struct TextDocument { | |
pub rope: Rope, | |
pub tree: Option<Tree>, |
use std::path::Path; | |
use std::process::Stdio; | |
use tokio::io::BufReader; | |
use tokio::io::AsyncBufReadExt; | |
use tokio::process::Command; | |
use tokio::sync::mpsc; | |
/// Execute a process, gather its mixed outputs into stdout | |
/// |
val kafkaProps = new Properties() | |
kafkaProps.put("bootstrap.servers", endpoint) | |
kafkaProps.put("key.serializer", classOf[ByteArraySerializer]) | |
kafkaProps.put("key.deserializer", classOf[ByteArrayDeserializer]) | |
kafkaProps.put("value.serializer", classOf[ByteArraySerializer]) | |
kafkaProps.put("value.deserializer", classOf[ByteArrayDeserializer]) | |
kafkaProps.put("group.id", "CrawlerTasksStorage") | |
kafkaProps.put("max.poll.records", "1000") | |
kafkaProps.put("enable.auto.commit","false") |
# Python http.server that sets Access-Control-Allow-Origin header. | |
# https://gist.github.com/razor-x/9542707 | |
import os | |
import sys | |
import http.server | |
import socketserver | |
PORT = 8000 |
val a = Stream(1) | |
//a: scala.collection.immutable.Stream[Int] = Stream(1, ?) | |
def b: Stream[Int] = Stream(b.head) | |
//b: Stream[Int] | |
a #::: b | |
//res0: scala.collection.immutable.Stream[Int] = Stream(1, ?) | |
a append b |