package main | |
import ( | |
"bytes" | |
"log" | |
"os" | |
"os/exec" | |
"syscall" | |
"github.com/cloudfoundry/gosigar/psnotify" |
taskkill /f /im explorer.exe | |
explorer.exe |
javascript: (function() { | |
/* | |
Adds download links to gifs on Giphy for easier mass download | |
jQuery if not already loaded. | |
*/ | |
if ('undefined'==typeof jQuery) { | |
script = document.createElement( 'script' ); | |
script.src = 'https://code.jquery.com/jquery-3.1.1.min.js'; | |
script.onload=getServers; | |
document.body.appendChild(script); |
So from what I can tell from the docs [https://docs.spring.io/spring-batch/trunk/reference/html/readersAndWriters.html#database]
Cursor based item readers use a DB single connection load the entire ResultSet into app memory by default then the application iterates over the ResultSet with a cursor (So not a DB cursor) mapping one row and writing it out at a time so previous rows can be garbage collected.
Although you can set the maxRows to limit the amount of rows in the ResultSet at one time, then when the ResultSet needs more rows it will (using the same connection) fetch more (amount depends on the value of fetchSize). This continues until all rows from the query are loaded into the ResultSet and read.
Page based item readers make multiple queries each returning a different "page" of the results (size configurable with setPageSize method)
I assume cursor based item readers probably use more memory (unless configured appropriately) and be faster, where as the page based would typically consume less memo
import lombok.extern.slf4j.Slf4j; | |
import lombok.val; | |
import org.springframework.http.HttpStatus; | |
import org.springframework.http.MediaType; | |
import org.springframework.web.filter.OncePerRequestFilter; | |
import org.springframework.web.util.ContentCachingRequestWrapper; | |
import org.springframework.web.util.ContentCachingResponseWrapper; | |
import javax.servlet.FilterChain; | |
import javax.servlet.ServletException; |
Look at LSB init scripts for more information.
Copy to /etc/init.d
:
# replace "$YOUR_SERVICE_NAME" with your service's name (whenever it's not enough obvious)
# The following will split a CSV (file.csv) into multiple parts of 1 million lines each | |
# with each part having its own header. | |
# | |
# PREFIX denotes the filename to use for the parts. A number will be added to the end. | |
tail -n +2 file.csv | | |
split -d -l 1000000 - --filter='sh -c "{ head -n1 file.csv; cat; } > $FILE"' PREFIX |
from lxml import html | |
import requests | |
import unicodecsv as csv | |
import argparse | |
def parse(zipcode,page=0,filter=None): | |
if filter=="newest": | |
url = "https://www.zillow.com/homes/for_sale/{0}/{1}_p/0_singlestory/days_sort".format(zipcode, page) | |
elif filter == "cheapest": |
tell application "Microsoft Outlook" | |
activate | |
set selectedMessages to selected objects | |
if selectedMessages is {} then | |
display notification "Please select a message in Outlook before running the script!" | |
else | |
set folderChoices to {"Inbox", "2 - Action", "3 - Waiting For", "4 - Reference", "5 - Archive"} | |
set selectedFolders to choose from list folderChoices with prompt "Select folder to move to:" default items "5 - Archive" | |
set selectedFolder to item 1 of selectedFolders | |
set destFolder to mail folder selectedFolder |