Skip to content

Instantly share code, notes, and snippets.

@rjpower
Created September 12, 2016 20:26
Show Gist options
  • Select an option

  • Save rjpower/f34a50fd34568224b830988d9c2eee5c to your computer and use it in GitHub Desktop.

Select an option

Save rjpower/f34a50fd34568224b830988d9c2eee5c to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import elasticsearch\n",
"import json\n",
"ES_URL = 'es.development.s2.dev.ai2:9200'\n",
"ES = elasticsearch.Elasticsearch(ES_URL)\n",
"\n",
"body = {\n",
" \"timeout\": \"5000ms\",\n",
" \"aggs\": {\n",
" \"years\": {\n",
" \"terms\" : { \n",
" \"field\" : \"year\",\n",
" \"size\": 100,\n",
" }\n",
" }\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"[' facilitates the flexible inference while preserving the visual characteristics of each image. 3) In each Graph LSTM unit, different forget gates for the neighboring nodes are learned to dynamically incorporate the local contextual interactions in accordance with their semantic relations. 4) We apply the proposed Graph LSTM in semantic object parsing, and demonstrate its superiority through comprehensive comparisons on four challenging semantic object parsing datasets (i.e., PASCAL-Person-Part',\n",
" ' incorporates pairwise or higher order factors. Instead of learning features only from local convolutional kernels as in Deep convnet Graph LSTM + Graph LSTM + Convolution Adaptive node updating sequence Confidence map Convolution Confidence map Residual connection Residual connection Input Parsing Result Superpixel map 1 1 1 1 Fig. 3 . Illustration of the proposed network architecture for semantic object parsing. The Graph LSTM layers built on a superpixel map are appended on the convolutional',\n",
" ' LSTM layers to improve the network training with many layers. these previous methods, we incorporate the global context by the novel Graph LSTM structure to capture long-distance dependencies on the superpixels. The dependency field of Graph LSTM can effectively cover the entire image context.\\n\\nThe Proposed Graph LSTM\\n\\nIn introducing Graph LSTM, we take semantic object parsing as its application scenario , which aims to generate pixel-wise semantic part segmentation for each image. Fig. 3']"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"query_string = \"lstm semantic parsing\"\n",
"\n",
"query = {\n",
" 'bool': {\n",
" 'should': [\n",
" {\n",
" 'query_string': {\n",
" 'query': query_string,\n",
" 'default_field': 'paperAbstract',\n",
" }\n",
" }\n",
" ],\n",
"# 'filter': [\n",
"# {'ids': {'values': doc_ids}}\n",
"# ]\n",
" }\n",
"}\n",
"\n",
"highlight_query = {\n",
" 'bool': {\n",
" 'should': [\n",
" {\n",
" 'query_string': {\n",
" 'query': query_string,\n",
" 'default_field': 'bodyText',\n",
" }\n",
" }\n",
" ],\n",
"# 'filter': [\n",
"# {'ids': {'values': doc_ids}}\n",
"# ]\n",
" }\n",
"}\n",
"\n",
"highlight_options = {\n",
" \"force_source\": True,\n",
" \"highlight_query\": highlight_query,\n",
" \"pre_tags\": [\"\"],\n",
" \"post_tags\": [\"\"],\n",
" \"fragment_size\": 500,\n",
" \"number_of_fragments\": 3,\n",
"}\n",
"\n",
"request = {\n",
" 'query': query,\n",
" 'highlight': {\n",
" 'fields': {\n",
" 'paperAbstract': highlight_options,\n",
" 'bodyText': highlight_options,\n",
" }\n",
" }\n",
"\n",
"}\n",
"\n",
"resp = ES.search(\n",
" index='paper', doc_type='paper',\n",
" fields=['title', 'paperAbstract', 'authors.name', 'year', 'numCitedBy', 'sourceInfo.pdfProcessed'],\n",
" body=request,\n",
" size=20,\n",
")\n",
"\n",
"data = []\n",
"for r in resp['hits']['hits']:\n",
" if not 'fields' in r:\n",
" logging.warning('Missing doc: %s', id)\n",
" continue\n",
"\n",
" fields = r['fields']\n",
" if 'highlight' in r:\n",
" snippets = list(r['highlight'].values())[0]\n",
" else:\n",
" snippets = []\n",
"\n",
" data.append({\n",
" 'title': fields['title'][0],\n",
" 'abstract': fields['paperAbstract'][0],\n",
" 'authors': fields['authors.name'][0],\n",
" 'citations': fields['numCitedBy'][0],\n",
" 'year': fields.get('year', [0])[0],\n",
" 'id': r['_id'],\n",
" 'snippets': snippets,\n",
" 'has_pdf': fields['sourceInfo.pdfProcessed'][0]\n",
" })\n",
" \n",
"import pandas as pd\n",
"df = pd.DataFrame.from_records(data)\n",
"df.snippets.iloc[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python [Root]",
"language": "python",
"name": "Python [Root]"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment