Set values you'd like to cache to the content of a FlowFile. Then pass it to PutDistributedCache.
You can enrich incoming FlowFiles by adding an attribute with a value fetched from DistributedCache.
| using Dapper; | |
| using Oracle.ManagedDataAccess.Client; | |
| using System; | |
| using System.Collections.Generic; | |
| using System.Data; | |
| using System.Linq; | |
| public class OracleDynamicParameters : Dapper.SqlMapper.IDynamicParameters { | |
| private static Dictionary<SqlMapper.Identity, Action<IDbCommand, object>> paramReaderCache = new Dictionary<SqlMapper.Identity, Action<IDbCommand, object>>( ); |
| // This is an example of using elastic's BulkProcessor with Elasticsearch. | |
| // | |
| // See https://github.com/olivere/elastic and | |
| // and https://github.com/olivere/elastic/wiki/BulkProcessor | |
| // for more details. | |
| /* | |
| * This example illustrates a simple process that performs bulk processing | |
| * with Elasticsearch using the BulkProcessor in elastic. | |
| * |
| public class AuthModule : NancyModule | |
| { | |
| public AuthModule() | |
| { | |
| Post("/login", async _ => | |
| { | |
| var myclaims = new List<Claim>(new Claim[] { new Claim("Id", "SOME USER ID FROM SOMEWHERE!!") }); | |
| var claimsPrincipal = new ClaimsPrincipal(new ClaimsIdentity(myclaims, "MyCookieMW")); |
| #!/bin/bash | |
| # Install Wavefront Proxy and configures standard telegraf plugin | |
| # #### | |
| function logo() { | |
| cat << "EOT" | |
| __ __ _____ __ | |
| / \ / \_____ ___ __ _____/ ____\______ ____ _____/ |_ | |
| \ \/\/ /\__ \\ \/ // __ \ __\\_ __ \/ _ \ / \ __\ | |
| \ / / __ \\ /\ ___/| | | | \( <_> ) | \ | |
This gist is based on the information available at golang/dep, only slightly more terse and annotated with a few notes and links primarily for my own personal benefit. It's public in case this information is helpful to anyone else as well.
I initially advocated Glide for my team and then, more recently, vndr. I've also taken the approach of exerting direct control over what goes into vendor/ in my Dockerfiles, and also work from
isolated GOPATH environments on my system per project to ensure that dependencies are explicitly found under vendor/.
At the end of the day, vendoring (and committing vendor/) is about being in control of your dependencies and being able to achieve reproducible builds. While you can achieve this manually, things that are nice to have in a vendoring tool include:
| BIT.LY link zu dieser Seite: | |
| https://bit.ly/weather-api | |
| API Key: | |
| bd5e378503939ddaee76f12ad7a97608 | |
| API Doku: | |
| http://openweathermap.org/current | |
| How to use an API key: |
| #!/bin/bash | |
| # create nifi user and group | |
| sudo useradd -m nifi --shell /bin/bash | |
| # create installation directory | |
| sudo mkdir /opt/nifi | |
| # download the tar ball | |
| sudo curl -o /opt/nifi/nifi-1.6.0-bin.tar.gz http://apache.melbourneitmirror.net/nifi/1.6.0/nifi-1.6.0-bin.tar.gz |
| CREATE SEQUENCE public.global_id_seq; | |
| ALTER SEQUENCE public.global_id_seq OWNER TO postgres; | |
| CREATE OR REPLACE FUNCTION public.id_generator() | |
| RETURNS bigint | |
| LANGUAGE 'plpgsql' | |
| AS $BODY$ | |
| DECLARE | |
| our_epoch bigint := 1314220021721; | |
| seq_id bigint; |
| type Set struct { | |
| list map[int]struct{} //empty structs occupy 0 memory | |
| } | |
| func (s *Set) Has(v int) bool { | |
| _, ok := s.list[v] | |
| return ok | |
| } |