Skip to content

Instantly share code, notes, and snippets.

@PhilipMay
Created June 24, 2020 20:16
Show Gist options
  • Save PhilipMay/8b042f713603e68deb5519fb7776d128 to your computer and use it in GitHub Desktop.
Save PhilipMay/8b042f713603e68deb5519fb7776d128 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "early-stop.ipynb",
"provenance": [],
"collapsed_sections": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"b4cbe7ea13b34187bb6110df44adb7ba": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_3597af482f8a43bcb9d30e0355653f08",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_5f825f1a7b064029b762f08dd59e8fff",
"IPY_MODEL_cc88f701b9614221ac4a5c1fde7fe3d9"
]
}
},
"3597af482f8a43bcb9d30e0355653f08": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"5f825f1a7b064029b762f08dd59e8fff": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_26e154c5290a44afa4ffb5b0befd6350",
"_dom_classes": [],
"description": "Downloading: 100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 254728,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 254728,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_8c1883f21e604ffdaa8d7c946f8ddbbc"
}
},
"cc88f701b9614221ac4a5c1fde7fe3d9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_d07be38ceb4745bca42aa8acb079f570",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 255k/255k [00:01<00:00, 210kB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_2575193d16cd4505ad513ccf2c075e36"
}
},
"26e154c5290a44afa4ffb5b0befd6350": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"8c1883f21e604ffdaa8d7c946f8ddbbc": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"d07be38ceb4745bca42aa8acb079f570": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"2575193d16cd4505ad513ccf2c075e36": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"ec3cfe686ab64c71b784a09e9509dc72": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_ec04ca09d94d4e5aa694f1381df2b64d",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_fef7a2f25551440c8ff3d82b859c88d9",
"IPY_MODEL_f72ee1fde53f45b09ca9f03d1c69edbf"
]
}
},
"ec04ca09d94d4e5aa694f1381df2b64d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"fef7a2f25551440c8ff3d82b859c88d9": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_72edf716621a418081cc3248bf438f95",
"_dom_classes": [],
"description": "Downloading: 100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 433,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 433,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_c1958e64aaab4128a30500de43765760"
}
},
"f72ee1fde53f45b09ca9f03d1c69edbf": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_2d8ec0ab88214fc39ab2c84e4a3da946",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 433/433 [00:41<00:00, 10.3B/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_a85c936714f3407b9f73df6b4a31df68"
}
},
"72edf716621a418081cc3248bf438f95": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"c1958e64aaab4128a30500de43765760": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"2d8ec0ab88214fc39ab2c84e4a3da946": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"a85c936714f3407b9f73df6b4a31df68": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"bc31dc04151c43fbb1c1206628143337": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
"_view_name": "HBoxView",
"_dom_classes": [],
"_model_name": "HBoxModel",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
"layout": "IPY_MODEL_1ae2bdfd14104caeb3abd6734f99982f",
"_model_module": "@jupyter-widgets/controls",
"children": [
"IPY_MODEL_c16536c584b941beb2895878b7bc5cdb",
"IPY_MODEL_6a5c1ce5004043d79630a7d5d27972e7"
]
}
},
"1ae2bdfd14104caeb3abd6734f99982f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"c16536c584b941beb2895878b7bc5cdb": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
"style": "IPY_MODEL_ae69dd9cf04440399791ae9b6991dd65",
"_dom_classes": [],
"description": "Downloading: 100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
"max": 438869143,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": 438869143,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_68e81308eea148558e9474705fd53608"
}
},
"6a5c1ce5004043d79630a7d5d27972e7": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
"style": "IPY_MODEL_b8a7cf667e734c88ad121d2f5eae263a",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "​",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"value": " 439M/439M [00:40<00:00, 10.8MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
"layout": "IPY_MODEL_ab7ee10d36b24e3785af73bf53da24e6"
}
},
"ae69dd9cf04440399791ae9b6991dd65": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "ProgressStyleModel",
"description_width": "initial",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"bar_color": null,
"_model_module": "@jupyter-widgets/controls"
}
},
"68e81308eea148558e9474705fd53608": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
},
"b8a7cf667e734c88ad121d2f5eae263a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
"_view_name": "StyleView",
"_model_name": "DescriptionStyleModel",
"description_width": "",
"_view_module": "@jupyter-widgets/base",
"_model_module_version": "1.5.0",
"_view_count": null,
"_view_module_version": "1.2.0",
"_model_module": "@jupyter-widgets/controls"
}
},
"ab7ee10d36b24e3785af73bf53da24e6": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
"_view_name": "LayoutView",
"grid_template_rows": null,
"right": null,
"justify_content": null,
"_view_module": "@jupyter-widgets/base",
"overflow": null,
"_model_module_version": "1.2.0",
"_view_count": null,
"flex_flow": null,
"width": null,
"min_width": null,
"border": null,
"align_items": null,
"bottom": null,
"_model_module": "@jupyter-widgets/base",
"top": null,
"grid_column": null,
"overflow_y": null,
"overflow_x": null,
"grid_auto_flow": null,
"grid_area": null,
"grid_template_columns": null,
"flex": null,
"_model_name": "LayoutModel",
"justify_items": null,
"grid_row": null,
"max_height": null,
"align_content": null,
"visibility": null,
"align_self": null,
"height": null,
"min_height": null,
"padding": null,
"grid_auto_rows": null,
"grid_gap": null,
"max_width": null,
"order": null,
"_view_module_version": "1.2.0",
"grid_template_areas": null,
"object_position": null,
"object_fit": null,
"grid_auto_columns": null,
"margin": null,
"display": null,
"left": null
}
}
}
}
},
"cells": [
{
"cell_type": "code",
"metadata": {
"id": "eg5041pZWFfM",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 398
},
"outputId": "92c275c7-bc9b-4dbb-8f5b-2334dde5c21e"
},
"source": [
"!nvidia-smi"
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": [
"Wed Jun 24 19:56:44 2020 \n",
"+-----------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 450.36.06 Driver Version: 418.67 CUDA Version: 10.1 |\n",
"|-------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
"| | | MIG M. |\n",
"|===============================+======================+======================|\n",
"| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n",
"| N/A 65C P8 11W / 70W | 0MiB / 15079MiB | 0% Default |\n",
"| | | ERR! |\n",
"+-------------------------------+----------------------+----------------------+\n",
" \n",
"+-----------------------------------------------------------------------------+\n",
"| Processes: |\n",
"| GPU GI CI PID Type Process name GPU Memory |\n",
"| ID ID Usage |\n",
"|=============================================================================|\n",
"| No running processes found |\n",
"+-----------------------------------------------------------------------------+\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "3-6kGWOGWWLA",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000
},
"outputId": "3df6cf52-5356-409a-b8f9-7c30415444aa"
},
"source": [
"!git clone https://github.com/deepset-ai/FARM.git\n",
"%cd FARM\n",
"!pip install -r requirements.txt\n",
"!pip install --editable ."
],
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"text": [
"Cloning into 'FARM'...\n",
"remote: Enumerating objects: 234, done.\u001b[K\n",
"remote: Counting objects: 100% (234/234), done.\u001b[K\n",
"remote: Compressing objects: 100% (150/150), done.\u001b[K\n",
"remote: Total 5461 (delta 162), reused 125 (delta 84), pack-reused 5227\u001b[K\n",
"Receiving objects: 100% (5461/5461), 65.48 MiB | 10.69 MiB/s, done.\n",
"Resolving deltas: 100% (4030/4030), done.\n",
"/content/FARM\n",
"Looking in links: https://download.pytorch.org/whl/torch_stable.html\n",
"Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 2)) (47.3.1)\n",
"Requirement already satisfied: wheel in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 3)) (0.34.2)\n",
"Collecting torch==1.5.0\n",
"\u001b[?25l Downloading https://download.pytorch.org/whl/cu92/torch-1.5.0%2Bcu92-cp36-cp36m-linux_x86_64.whl (603.7MB)\n",
"\u001b[K |████████████████████████████████| 603.7MB 30kB/s \n",
"\u001b[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 8)) (4.41.1)\n",
"Requirement already satisfied: boto3 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 10)) (1.14.5)\n",
"Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 12)) (2.23.0)\n",
"Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 14)) (1.4.1)\n",
"Requirement already satisfied: sklearn in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 15)) (0.0)\n",
"Collecting seqeval\n",
" Downloading https://files.pythonhosted.org/packages/34/91/068aca8d60ce56dd9ba4506850e876aba5e66a6f2f29aa223224b50df0de/seqeval-0.0.12.tar.gz\n",
"Collecting mlflow==1.0.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/01/ec/8c9448968d4662e8354b9c3a62e635f8929ed507a45af3d9fdb84be51270/mlflow-1.0.0-py3-none-any.whl (47.7MB)\n",
"\u001b[K |████████████████████████████████| 47.7MB 64kB/s \n",
"\u001b[?25hCollecting transformers==2.11.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/48/35/ad2c5b1b8f99feaaf9d7cdadaeef261f098c6e1a6a2935d4d07662a6b780/transformers-2.11.0-py3-none-any.whl (674kB)\n",
"\u001b[K |████████████████████████████████| 675kB 44.9MB/s \n",
"\u001b[?25hCollecting dotmap==1.3.0\n",
" Downloading https://files.pythonhosted.org/packages/fa/eb/ee5f0358a9e0ede90308d8f34e697e122f191c2702dc4f614eca7770b1eb/dotmap-1.3.0-py3-none-any.whl\n",
"Collecting Werkzeug==0.16.1\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/c2/e4/a859d2fe516f466642fa5c6054fd9646271f9da26b0cac0d2f37fc858c8f/Werkzeug-0.16.1-py2.py3-none-any.whl (327kB)\n",
"\u001b[K |████████████████████████████████| 327kB 52.6MB/s \n",
"\u001b[?25hRequirement already satisfied: flask in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 25)) (1.1.2)\n",
"Collecting flask-restplus\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/c2/a6/b17c848771f96ad039ad9e3ea275e842a16c39c4f3eb9f60ee330b20b6c2/flask_restplus-0.13.0-py2.py3-none-any.whl (2.5MB)\n",
"\u001b[K |████████████████████████████████| 2.5MB 50.5MB/s \n",
"\u001b[?25hCollecting flask-cors\n",
" Downloading https://files.pythonhosted.org/packages/78/38/e68b11daa5d613e3a91e4bf3da76c94ac9ee0d9cd515af9c1ab80d36f709/Flask_Cors-3.0.8-py2.py3-none-any.whl\n",
"Requirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 28)) (0.3.2)\n",
"Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch==1.5.0->-r requirements.txt (line 6)) (0.16.0)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torch==1.5.0->-r requirements.txt (line 6)) (1.18.5)\n",
"Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3->-r requirements.txt (line 10)) (0.10.0)\n",
"Requirement already satisfied: botocore<1.18.0,>=1.17.5 in /usr/local/lib/python3.6/dist-packages (from boto3->-r requirements.txt (line 10)) (1.17.5)\n",
"Requirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from boto3->-r requirements.txt (line 10)) (0.3.3)\n",
"Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->-r requirements.txt (line 12)) (3.0.4)\n",
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->-r requirements.txt (line 12)) (1.24.3)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->-r requirements.txt (line 12)) (2.9)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->-r requirements.txt (line 12)) (2020.4.5.2)\n",
"Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from sklearn->-r requirements.txt (line 15)) (0.22.2.post1)\n",
"Requirement already satisfied: Keras>=2.2.4 in /usr/local/lib/python3.6/dist-packages (from seqeval->-r requirements.txt (line 17)) (2.3.1)\n",
"Collecting gitpython>=2.1.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/8c/f9/c315aa88e51fabdc08e91b333cfefb255aff04a2ee96d632c32cb19180c9/GitPython-3.1.3-py3-none-any.whl (451kB)\n",
"\u001b[K |████████████████████████████████| 460kB 51.7MB/s \n",
"\u001b[?25hRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (3.13)\n",
"Collecting docker>=3.6.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/2b/80/4eab8a38ff62c31716d07753980a7c5e6550b61096926384f01e742b4a4b/docker-4.2.1-py2.py3-none-any.whl (143kB)\n",
"\u001b[K |████████████████████████████████| 153kB 58.4MB/s \n",
"\u001b[?25hRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (1.12.0)\n",
"Collecting simplejson\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/98/87/a7b98aa9256c8843f92878966dc3d8d914c14aad97e2c5ce4798d5743e07/simplejson-3.17.0.tar.gz (83kB)\n",
"\u001b[K |████████████████████████████████| 92kB 715kB/s \n",
"\u001b[?25hRequirement already satisfied: python-dateutil in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (2.8.1)\n",
"Requirement already satisfied: sqlparse in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (0.3.1)\n",
"Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (1.0.5)\n",
"Collecting gunicorn\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/69/ca/926f7cd3a2014b16870086b2d0fdc84a9e49473c68a8dff8b57f7c156f43/gunicorn-20.0.4-py2.py3-none-any.whl (77kB)\n",
"\u001b[K |████████████████████████████████| 81kB 11.2MB/s \n",
"\u001b[?25hRequirement already satisfied: entrypoints in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (0.3)\n",
"Requirement already satisfied: sqlalchemy in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (1.3.17)\n",
"Collecting databricks-cli>=0.8.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/1e/57/5c2d6b83cb8753d12f548e89f91037632baa8289677c1b2ab2adf14bf6b2/databricks-cli-0.11.0.tar.gz (49kB)\n",
"\u001b[K |████████████████████████████████| 51kB 7.4MB/s \n",
"\u001b[?25hCollecting alembic\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/60/1e/cabc75a189de0fbb2841d0975243e59bde8b7822bacbb95008ac6fe9ad47/alembic-1.4.2.tar.gz (1.1MB)\n",
"\u001b[K |████████████████████████████████| 1.1MB 44.8MB/s \n",
"\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
" Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
" Preparing wheel metadata ... \u001b[?25l\u001b[?25hdone\n",
"Collecting querystring-parser\n",
" Downloading https://files.pythonhosted.org/packages/4a/fa/f54f5662e0eababf0c49e92fd94bf178888562c0e7b677c8941bbbcd1bd6/querystring_parser-1.2.4.tar.gz\n",
"Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (3.10.0)\n",
"Requirement already satisfied: cloudpickle in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (1.3.0)\n",
"Requirement already satisfied: click>=7.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->-r requirements.txt (line 18)) (7.1.2)\n",
"Collecting sacremoses\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/7d/34/09d19aff26edcc8eb2a01bed8e98f13a1537005d31e95233fd48216eed10/sacremoses-0.0.43.tar.gz (883kB)\n",
"\u001b[K |████████████████████████████████| 890kB 55.3MB/s \n",
"\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->-r requirements.txt (line 20)) (20.4)\n",
"Requirement already satisfied: dataclasses; python_version < \"3.7\" in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->-r requirements.txt (line 20)) (0.7)\n",
"Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->-r requirements.txt (line 20)) (2019.12.20)\n",
"Collecting tokenizers==0.7.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/14/e5/a26eb4716523808bb0a799fcfdceb6ebf77a18169d9591b2f46a9adb87d9/tokenizers-0.7.0-cp36-cp36m-manylinux1_x86_64.whl (3.8MB)\n",
"\u001b[K |████████████████████████████████| 3.8MB 40.1MB/s \n",
"\u001b[?25hCollecting sentencepiece\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/d4/a4/d0a884c4300004a78cca907a6ff9a5e9fe4f090f5d95ab341c53d28cbc58/sentencepiece-0.1.91-cp36-cp36m-manylinux1_x86_64.whl (1.1MB)\n",
"\u001b[K |████████████████████████████████| 1.1MB 60.0MB/s \n",
"\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->-r requirements.txt (line 20)) (3.0.12)\n",
"Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/lib/python3.6/dist-packages (from flask->-r requirements.txt (line 25)) (2.11.2)\n",
"Requirement already satisfied: itsdangerous>=0.24 in /usr/local/lib/python3.6/dist-packages (from flask->-r requirements.txt (line 25)) (1.1.0)\n",
"Requirement already satisfied: pytz in /usr/local/lib/python3.6/dist-packages (from flask-restplus->-r requirements.txt (line 26)) (2018.9)\n",
"Requirement already satisfied: jsonschema in /usr/local/lib/python3.6/dist-packages (from flask-restplus->-r requirements.txt (line 26)) (2.6.0)\n",
"Collecting aniso8601>=0.82\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/eb/e4/787e104b58eadc1a710738d4e418d7e599e4e778e52cb8e5d5ef6ddd5833/aniso8601-8.0.0-py2.py3-none-any.whl (43kB)\n",
"\u001b[K |████████████████████████████████| 51kB 7.4MB/s \n",
"\u001b[?25hRequirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.18.0,>=1.17.5->boto3->-r requirements.txt (line 10)) (0.15.2)\n",
"Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->sklearn->-r requirements.txt (line 15)) (0.15.1)\n",
"Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from Keras>=2.2.4->seqeval->-r requirements.txt (line 17)) (2.10.0)\n",
"Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from Keras>=2.2.4->seqeval->-r requirements.txt (line 17)) (1.0.8)\n",
"Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from Keras>=2.2.4->seqeval->-r requirements.txt (line 17)) (1.1.2)\n",
"Collecting gitdb<5,>=4.0.1\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/48/11/d1800bca0a3bae820b84b7d813ad1eff15a48a64caea9c823fc8c1b119e8/gitdb-4.0.5-py3-none-any.whl (63kB)\n",
"\u001b[K |████████████████████████████████| 71kB 10.2MB/s \n",
"\u001b[?25hCollecting websocket-client>=0.32.0\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/4c/5f/f61b420143ed1c8dc69f9eaec5ff1ac36109d52c80de49d66e0c36c3dfdf/websocket_client-0.57.0-py2.py3-none-any.whl (200kB)\n",
"\u001b[K |████████████████████████████████| 204kB 60.4MB/s \n",
"\u001b[?25hRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from databricks-cli>=0.8.0->mlflow==1.0.0->-r requirements.txt (line 18)) (0.8.7)\n",
"Collecting python-editor>=0.3\n",
" Downloading https://files.pythonhosted.org/packages/c6/d3/201fc3abe391bbae6606e6f1d598c15d367033332bd54352b12f35513717/python_editor-1.0.4-py3-none-any.whl\n",
"Collecting Mako\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/a6/37/0e706200d22172eb8fa17d68a7ae22dec7631a0a92266634fb518a88a5b2/Mako-1.1.3-py2.py3-none-any.whl (75kB)\n",
"\u001b[K |████████████████████████████████| 81kB 11.1MB/s \n",
"\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers==2.11.0->-r requirements.txt (line 20)) (2.4.7)\n",
"Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from Jinja2>=2.10.1->flask->-r requirements.txt (line 25)) (1.1.1)\n",
"Collecting smmap<4,>=3.0.1\n",
" Downloading https://files.pythonhosted.org/packages/b0/9a/4d409a6234eb940e6a78dfdfc66156e7522262f5f2fecca07dc55915952d/smmap-3.0.4-py2.py3-none-any.whl\n",
"Building wheels for collected packages: alembic\n",
" Building wheel for alembic (PEP 517) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for alembic: filename=alembic-1.4.2-cp36-none-any.whl size=159543 sha256=2ebc3397917780a420e3ab56ada5703726562f66fbe251bc17d5448aa12ed605\n",
" Stored in directory: /root/.cache/pip/wheels/1f/04/83/76023f7a4c14688c0b5c2682a96392cfdd3ee4449eaaa287ef\n",
"Successfully built alembic\n",
"Building wheels for collected packages: seqeval, simplejson, databricks-cli, querystring-parser, sacremoses\n",
" Building wheel for seqeval (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for seqeval: filename=seqeval-0.0.12-cp36-none-any.whl size=7424 sha256=ac89e7007d8c6388609f06b952297e6203a2d9f59096fc1352a3ba77ca89cffa\n",
" Stored in directory: /root/.cache/pip/wheels/4f/32/0a/df3b340a82583566975377d65e724895b3fad101a3fb729f68\n",
" Building wheel for simplejson (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for simplejson: filename=simplejson-3.17.0-cp36-cp36m-linux_x86_64.whl size=114201 sha256=08e2d1e0cd7646164c53c4d06e56395cc05f28b4da04b2fa6beaa885b98a36f1\n",
" Stored in directory: /root/.cache/pip/wheels/86/c0/83/dcd0339abb2640544bb8e0938aab2d069cef55e5647ce6e097\n",
" Building wheel for databricks-cli (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for databricks-cli: filename=databricks_cli-0.11.0-cp36-none-any.whl size=90300 sha256=d2f3e3c92f8977983d55b15a226de20f2ac18e0e96cca7d5668a59147167415c\n",
" Stored in directory: /root/.cache/pip/wheels/63/d0/4f/3deeca1f4c47a6aca7c2c6a6e2bf272391565dc86a7718a59b\n",
" Building wheel for querystring-parser (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for querystring-parser: filename=querystring_parser-1.2.4-cp36-none-any.whl size=7079 sha256=3d5f407aeefa7499b9b1831afd46949bc55cdc76251ce68504b62cd9a871dd07\n",
" Stored in directory: /root/.cache/pip/wheels/1e/41/34/23ebf5d1089a9aed847951e0ee375426eb4ad0a7079d88d41e\n",
" Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for sacremoses: filename=sacremoses-0.0.43-cp36-none-any.whl size=893260 sha256=286b15df608ba8122d655e66cbee4b0e38c2d38374b605406b46542b437e170d\n",
" Stored in directory: /root/.cache/pip/wheels/29/3c/fd/7ce5c3f0666dab31a50123635e6fb5e19ceb42ce38d4e58f45\n",
"Successfully built seqeval simplejson databricks-cli querystring-parser sacremoses\n",
"\u001b[31mERROR: torchvision 0.6.1+cu101 has requirement torch==1.5.1, but you'll have torch 1.5.0+cu92 which is incompatible.\u001b[0m\n",
"Installing collected packages: torch, seqeval, smmap, gitdb, gitpython, websocket-client, docker, simplejson, gunicorn, databricks-cli, python-editor, Mako, alembic, querystring-parser, mlflow, sacremoses, tokenizers, sentencepiece, transformers, dotmap, Werkzeug, aniso8601, flask-restplus, flask-cors\n",
" Found existing installation: torch 1.5.1+cu101\n",
" Uninstalling torch-1.5.1+cu101:\n",
" Successfully uninstalled torch-1.5.1+cu101\n",
" Found existing installation: Werkzeug 1.0.1\n",
" Uninstalling Werkzeug-1.0.1:\n",
" Successfully uninstalled Werkzeug-1.0.1\n",
"Successfully installed Mako-1.1.3 Werkzeug-0.16.1 alembic-1.4.2 aniso8601-8.0.0 databricks-cli-0.11.0 docker-4.2.1 dotmap-1.3.0 flask-cors-3.0.8 flask-restplus-0.13.0 gitdb-4.0.5 gitpython-3.1.3 gunicorn-20.0.4 mlflow-1.0.0 python-editor-1.0.4 querystring-parser-1.2.4 sacremoses-0.0.43 sentencepiece-0.1.91 seqeval-0.0.12 simplejson-3.17.0 smmap-3.0.4 tokenizers-0.7.0 torch-1.5.0+cu92 transformers-2.11.0 websocket-client-0.57.0\n",
"Obtaining file:///content/FARM\n",
"Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (47.3.1)\n",
"Requirement already satisfied: wheel in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (0.34.2)\n",
"Requirement already satisfied: torch==1.5.0 in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (1.5.0+cu92)\n",
"Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (4.41.1)\n",
"Requirement already satisfied: boto3 in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (1.14.5)\n",
"Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (2.23.0)\n",
"Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (1.4.1)\n",
"Requirement already satisfied: sklearn in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (0.0)\n",
"Requirement already satisfied: seqeval in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (0.0.12)\n",
"Requirement already satisfied: mlflow==1.0.0 in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (1.0.0)\n",
"Requirement already satisfied: transformers==2.11.0 in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (2.11.0)\n",
"Requirement already satisfied: dotmap==1.3.0 in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (1.3.0)\n",
"Requirement already satisfied: Werkzeug==0.16.1 in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (0.16.1)\n",
"Requirement already satisfied: flask in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (1.1.2)\n",
"Requirement already satisfied: flask-restplus in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (0.13.0)\n",
"Requirement already satisfied: flask-cors in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (3.0.8)\n",
"Requirement already satisfied: dill in /usr/local/lib/python3.6/dist-packages (from farm==0.4.5) (0.3.2)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torch==1.5.0->farm==0.4.5) (1.18.5)\n",
"Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch==1.5.0->farm==0.4.5) (0.16.0)\n",
"Requirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from boto3->farm==0.4.5) (0.3.3)\n",
"Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3->farm==0.4.5) (0.10.0)\n",
"Requirement already satisfied: botocore<1.18.0,>=1.17.5 in /usr/local/lib/python3.6/dist-packages (from boto3->farm==0.4.5) (1.17.5)\n",
"Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->farm==0.4.5) (2.9)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->farm==0.4.5) (2020.4.5.2)\n",
"Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->farm==0.4.5) (3.0.4)\n",
"Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->farm==0.4.5) (1.24.3)\n",
"Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from sklearn->farm==0.4.5) (0.22.2.post1)\n",
"Requirement already satisfied: Keras>=2.2.4 in /usr/local/lib/python3.6/dist-packages (from seqeval->farm==0.4.5) (2.3.1)\n",
"Requirement already satisfied: gunicorn in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (20.0.4)\n",
"Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (1.0.5)\n",
"Requirement already satisfied: gitpython>=2.1.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (3.1.3)\n",
"Requirement already satisfied: click>=7.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (7.1.2)\n",
"Requirement already satisfied: alembic in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (1.4.2)\n",
"Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (3.10.0)\n",
"Requirement already satisfied: simplejson in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (3.17.0)\n",
"Requirement already satisfied: sqlalchemy in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (1.3.17)\n",
"Requirement already satisfied: databricks-cli>=0.8.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (0.11.0)\n",
"Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (1.12.0)\n",
"Requirement already satisfied: cloudpickle in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (1.3.0)\n",
"Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (3.13)\n",
"Requirement already satisfied: querystring-parser in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (1.2.4)\n",
"Requirement already satisfied: sqlparse in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (0.3.1)\n",
"Requirement already satisfied: docker>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (4.2.1)\n",
"Requirement already satisfied: entrypoints in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (0.3)\n",
"Requirement already satisfied: python-dateutil in /usr/local/lib/python3.6/dist-packages (from mlflow==1.0.0->farm==0.4.5) (2.8.1)\n",
"Requirement already satisfied: sacremoses in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->farm==0.4.5) (0.0.43)\n",
"Requirement already satisfied: sentencepiece in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->farm==0.4.5) (0.1.91)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->farm==0.4.5) (3.0.12)\n",
"Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->farm==0.4.5) (2019.12.20)\n",
"Requirement already satisfied: tokenizers==0.7.0 in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->farm==0.4.5) (0.7.0)\n",
"Requirement already satisfied: dataclasses; python_version < \"3.7\" in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->farm==0.4.5) (0.7)\n",
"Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers==2.11.0->farm==0.4.5) (20.4)\n",
"Requirement already satisfied: Jinja2>=2.10.1 in /usr/local/lib/python3.6/dist-packages (from flask->farm==0.4.5) (2.11.2)\n",
"Requirement already satisfied: itsdangerous>=0.24 in /usr/local/lib/python3.6/dist-packages (from flask->farm==0.4.5) (1.1.0)\n",
"Requirement already satisfied: pytz in /usr/local/lib/python3.6/dist-packages (from flask-restplus->farm==0.4.5) (2018.9)\n",
"Requirement already satisfied: aniso8601>=0.82 in /usr/local/lib/python3.6/dist-packages (from flask-restplus->farm==0.4.5) (8.0.0)\n",
"Requirement already satisfied: jsonschema in /usr/local/lib/python3.6/dist-packages (from flask-restplus->farm==0.4.5) (2.6.0)\n",
"Requirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.18.0,>=1.17.5->boto3->farm==0.4.5) (0.15.2)\n",
"Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->sklearn->farm==0.4.5) (0.15.1)\n",
"Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from Keras>=2.2.4->seqeval->farm==0.4.5) (2.10.0)\n",
"Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from Keras>=2.2.4->seqeval->farm==0.4.5) (1.0.8)\n",
"Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from Keras>=2.2.4->seqeval->farm==0.4.5) (1.1.2)\n",
"Requirement already satisfied: gitdb<5,>=4.0.1 in /usr/local/lib/python3.6/dist-packages (from gitpython>=2.1.0->mlflow==1.0.0->farm==0.4.5) (4.0.5)\n",
"Requirement already satisfied: python-editor>=0.3 in /usr/local/lib/python3.6/dist-packages (from alembic->mlflow==1.0.0->farm==0.4.5) (1.0.4)\n",
"Requirement already satisfied: Mako in /usr/local/lib/python3.6/dist-packages (from alembic->mlflow==1.0.0->farm==0.4.5) (1.1.3)\n",
"Requirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from databricks-cli>=0.8.0->mlflow==1.0.0->farm==0.4.5) (0.8.7)\n",
"Requirement already satisfied: websocket-client>=0.32.0 in /usr/local/lib/python3.6/dist-packages (from docker>=3.6.0->mlflow==1.0.0->farm==0.4.5) (0.57.0)\n",
"Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers==2.11.0->farm==0.4.5) (2.4.7)\n",
"Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from Jinja2>=2.10.1->flask->farm==0.4.5) (1.1.1)\n",
"Requirement already satisfied: smmap<4,>=3.0.1 in /usr/local/lib/python3.6/dist-packages (from gitdb<5,>=4.0.1->gitpython>=2.1.0->mlflow==1.0.0->farm==0.4.5) (3.0.4)\n",
"Installing collected packages: farm\n",
" Running setup.py develop for farm\n",
"Successfully installed farm\n"
],
"name": "stdout"
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "0cQ8sV0JzSSE",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000,
"referenced_widgets": [
"b4cbe7ea13b34187bb6110df44adb7ba",
"3597af482f8a43bcb9d30e0355653f08",
"5f825f1a7b064029b762f08dd59e8fff",
"cc88f701b9614221ac4a5c1fde7fe3d9",
"26e154c5290a44afa4ffb5b0befd6350",
"8c1883f21e604ffdaa8d7c946f8ddbbc",
"d07be38ceb4745bca42aa8acb079f570",
"2575193d16cd4505ad513ccf2c075e36",
"ec3cfe686ab64c71b784a09e9509dc72",
"ec04ca09d94d4e5aa694f1381df2b64d",
"fef7a2f25551440c8ff3d82b859c88d9",
"f72ee1fde53f45b09ca9f03d1c69edbf",
"72edf716621a418081cc3248bf438f95",
"c1958e64aaab4128a30500de43765760",
"2d8ec0ab88214fc39ab2c84e4a3da946",
"a85c936714f3407b9f73df6b4a31df68",
"bc31dc04151c43fbb1c1206628143337",
"1ae2bdfd14104caeb3abd6734f99982f",
"c16536c584b941beb2895878b7bc5cdb",
"6a5c1ce5004043d79630a7d5d27972e7",
"ae69dd9cf04440399791ae9b6991dd65",
"68e81308eea148558e9474705fd53608",
"b8a7cf667e734c88ad121d2f5eae263a",
"ab7ee10d36b24e3785af73bf53da24e6"
]
},
"outputId": "14fa8f6f-79ca-4342-b8fc-b4def3b7cc46"
},
"source": [
"# fmt: off\n",
"import logging\n",
"from pathlib import Path\n",
"\n",
"from farm.data_handler.data_silo import DataSilo\n",
"from farm.data_handler.processor import TextClassificationProcessor\n",
"from farm.modeling.optimization import initialize_optimizer\n",
"from farm.infer import Inferencer\n",
"from farm.modeling.adaptive_model import AdaptiveModel\n",
"from farm.modeling.language_model import LanguageModel\n",
"from farm.modeling.prediction_head import TextClassificationHead\n",
"from farm.modeling.tokenization import Tokenizer\n",
"from farm.train import Trainer, EarlyStopping\n",
"from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings\n",
"from sklearn.metrics import f1_score\n",
"from farm.evaluation.metrics import simple_accuracy, register_metrics\n",
"\n",
"def doc_classification_with_earlystopping():\n",
" #logging.basicConfig(\n",
" # format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n",
" # datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
" # level=logging.INFO)\n",
"\n",
" #ml_logger = MLFlowLogger(tracking_uri=\"https://public-mlflow.deepset.ai/\")\n",
" # for local logging instead:\n",
" # ml_logger = MLFlowLogger(tracking_uri=\"logs\")\n",
" #ml_logger.init_experiment(experiment_name=\"Public_FARM\", run_name=\"DocClassification_ES_f1_1\")\n",
"\n",
" ##########################\n",
" ########## Settings\n",
" ##########################\n",
" set_all_seeds(seed=42)\n",
" use_amp = None\n",
" device, n_gpu = initialize_device_settings(use_cuda=True)\n",
" n_epochs = 20\n",
" batch_size = 32\n",
" evaluate_every = 100\n",
" lang_model = \"bert-base-german-cased\"\n",
" do_lower_case = False\n",
"\n",
" # 1.Create a tokenizer\n",
" tokenizer = Tokenizer.load(\n",
" pretrained_model_name_or_path=lang_model,\n",
" do_lower_case=do_lower_case)\n",
"\n",
" # 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset\n",
" # Here we load GermEval 2018 Data automaticaly if it is not available.\n",
" # GermEval 2018 only has train.tsv and test.tsv dataset - no dev.tsv\n",
"\n",
" # The processor wants to know the possible labels ...\n",
" label_list = [\"OTHER\", \"OFFENSE\"]\n",
"\n",
" # The evaluation on the dev-set can be done with one of the predefined metrics or with a\n",
" # metric defined as a function from (preds, labels) to a dict that contains all the actual\n",
" # metrics values. The function must get registered under a string name and the string name must\n",
" # be used.\n",
" def mymetrics(preds, labels):\n",
" acc = simple_accuracy(preds, labels)\n",
" f1other = f1_score(y_true=labels, y_pred=preds, pos_label=\"OTHER\")\n",
" f1offense = f1_score(y_true=labels, y_pred=preds, pos_label=\"OFFENSE\")\n",
" f1macro = f1_score(y_true=labels, y_pred=preds, average=\"macro\")\n",
" f1micro = f1_score(y_true=labels, y_pred=preds, average=\"macro\")\n",
" return {\"acc\": acc, \"f1_other\": f1other, \"f1_offense\": f1offense, \"f1_macro\": f1macro, \"f1_micro\": f1micro}\n",
" register_metrics('mymetrics', mymetrics)\n",
" metric = 'mymetrics'\n",
"\n",
" processor = TextClassificationProcessor(tokenizer=tokenizer,\n",
" max_seq_len=64,\n",
" data_dir=Path(\"../data/germeval18\"),\n",
" label_list=label_list,\n",
" metric=metric,\n",
" label_column_name=\"coarse_label\"\n",
" )\n",
"\n",
" # 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets\n",
" data_silo = DataSilo(\n",
" processor=processor,\n",
" batch_size=batch_size)\n",
"\n",
" # 4. Create an AdaptiveModel\n",
" # a) which consists of a pretrained language model as a basis\n",
" language_model = LanguageModel.load(lang_model)\n",
" # b) and a prediction head on top that is suited for our task => Text classification\n",
" prediction_head = TextClassificationHead(num_labels=len(label_list),\n",
" class_weights=data_silo.calculate_class_weights(task_name=\"text_classification\"))\n",
"\n",
"\n",
"\n",
" model = AdaptiveModel(\n",
" language_model=language_model,\n",
" prediction_heads=[prediction_head],\n",
" embeds_dropout_prob=0.2,\n",
" lm_output_types=[\"per_sequence\"],\n",
" device=device)\n",
"\n",
" # 5. Create an optimizer\n",
" model, optimizer, lr_schedule = initialize_optimizer(\n",
" model=model,\n",
" learning_rate=0.5e-5,\n",
" device=device,\n",
" n_batches=len(data_silo.loaders[\"train\"]),\n",
" n_epochs=n_epochs,\n",
" use_amp=use_amp)\n",
"\n",
" # 6. Feed everything to the Trainer, which keeps care of growing our model into powerful plant and evaluates it from time to time\n",
" # Also create an EarlyStopping instance and pass it on to the trainer\n",
"\n",
" # An early stopping instance can be used to save the model that performs best on the dev set\n",
" # according to some metric and stop training when no improvement is happening for some iterations.\n",
" earlystopping = EarlyStopping(\n",
" metric=\"f1_offense\", mode=\"max\", # use the metric from our own metrics function instead of loss\n",
" # metric=\"f1_macro\", mode=\"max\", # use f1_macro from the dev evaluator of the trainer\n",
" # metric=\"loss\", mode=\"min\", # use loss from the dev evaluator of the trainer\n",
" save_dir=Path(\"saved_models/bert-german-doc-tutorial-es\"), # where to save the best model\n",
" patience=5 # number of evaluations to wait for improvement before terminating the training\n",
" )\n",
"\n",
" trainer = Trainer(\n",
" model=model,\n",
" optimizer=optimizer,\n",
" data_silo=data_silo,\n",
" epochs=n_epochs,\n",
" n_gpu=n_gpu,\n",
" lr_schedule=lr_schedule,\n",
" evaluate_every=evaluate_every,\n",
" device=device,\n",
" early_stopping=earlystopping)\n",
"\n",
" # 7. Let it grow\n",
" trainer.train()\n",
"\n",
" # 8. Hooray! You have a model.\n",
" # NOTE: if early stopping is used, the best model has been stored already in the directory\n",
" # defined with the EarlyStopping instance\n",
" # The model we have at this moment is the model from the last training epoch that was carried\n",
" # out before early stopping terminated the training\n",
" save_dir = Path(\"saved_models/bert-german-doc-tutorial\")\n",
" model.save(save_dir)\n",
" processor.save(save_dir)\n",
"\n",
" # 9. Load it & harvest your fruits (Inference)\n",
" basic_texts = [\n",
" {\"text\": \"Schartau sagte dem Tagesspiegel, dass Fischer ein Idiot sei\"},\n",
" {\"text\": \"Martin Müller spielt Handball in Berlin\"},\n",
" ]\n",
"\n",
" # Load from the final epoch directory and apply\n",
" print(\"LOADING INFERENCER FROM FINAL MODEL DURING TRAINING\")\n",
" model = Inferencer.load(save_dir)\n",
" result = model.inference_from_dicts(dicts=basic_texts)\n",
" print(result)\n",
"\n",
" # Load from saved best model\n",
" print(\"LOADING INFERENCER FROM BEST MODEL DURING TRAINING\")\n",
" model = Inferencer.load(earlystopping.save_dir)\n",
" result = model.inference_from_dicts(dicts=basic_texts)\n",
" print(\"APPLICATION ON BEST MODEL\")\n",
" print(result)\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" doc_classification_with_earlystopping()\n",
"\n",
"# fmt: on\n"
],
"execution_count": 3,
"outputs": [
{
"output_type": "stream",
"text": [
"06/24/2020 19:59:45 - INFO - transformers.file_utils - PyTorch version 1.5.0+cu92 available.\n",
"06/24/2020 19:59:46 - INFO - transformers.file_utils - TensorFlow version 2.2.0 available.\n",
"06/24/2020 19:59:47 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n",
"06/24/2020 19:59:47 - INFO - farm.modeling.tokenization - Loading tokenizer of type 'BertTokenizer'\n",
"06/24/2020 19:59:48 - INFO - filelock - Lock 140320089145072 acquired on /root/.cache/torch/transformers/da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.31ccc255fc2bad3578089a3997f16b286498ba78c0adc43b5bb2a3f9a0d2c85c.lock\n",
"06/24/2020 19:59:48 - INFO - transformers.file_utils - https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt not found in cache or force_download set to True, downloading to /root/.cache/torch/transformers/tmp58mv_1q1\n"
],
"name": "stderr"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b4cbe7ea13b34187bb6110df44adb7ba",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=254728.0, style=ProgressStyle(descripti…"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"06/24/2020 19:59:50 - INFO - transformers.file_utils - storing https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt in cache at /root/.cache/torch/transformers/da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.31ccc255fc2bad3578089a3997f16b286498ba78c0adc43b5bb2a3f9a0d2c85c\n",
"06/24/2020 19:59:50 - INFO - transformers.file_utils - creating metadata file for /root/.cache/torch/transformers/da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.31ccc255fc2bad3578089a3997f16b286498ba78c0adc43b5bb2a3f9a0d2c85c\n",
"06/24/2020 19:59:50 - INFO - filelock - Lock 140320089145072 released on /root/.cache/torch/transformers/da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.31ccc255fc2bad3578089a3997f16b286498ba78c0adc43b5bb2a3f9a0d2c85c.lock\n",
"06/24/2020 19:59:50 - INFO - transformers.tokenization_utils - loading file https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt from cache at /root/.cache/torch/transformers/da299cdd121a3d71e1626f2908dda0d02658f42e925a3d6abd8273ec08cf41a6.31ccc255fc2bad3578089a3997f16b286498ba78c0adc43b5bb2a3f9a0d2c85c\n",
"06/24/2020 19:59:50 - INFO - farm.data_handler.data_silo - \n",
"Loading data into the data silo ... \n",
" ______\n",
" |o | !\n",
" __ |:`_|---'-.\n",
" |__|______.-/ _ \\-----.| \n",
" (o)(o)------'\\ _ / ( ) \n",
" \n",
"06/24/2020 19:59:50 - INFO - farm.data_handler.data_silo - Loading train set from: ../data/germeval18/train.tsv \n",
"06/24/2020 19:59:50 - INFO - farm.data_handler.utils - Couldn't find ../data/germeval18/train.tsv locally. Trying to download ...\n",
"06/24/2020 19:59:50 - INFO - farm.data_handler.utils - downloading and extracting file germeval18 to dir /content/data\n"
],
"name": "stderr"
},
{
"output_type": "stream",
"text": [
"\n"
],
"name": "stdout"
},
{
"output_type": "stream",
"text": [
"100%|██████████| 525101/525101 [00:01<00:00, 355981.08B/s]\n",
"06/24/2020 19:59:53 - INFO - farm.data_handler.data_silo - Got ya 1 parallel workers to convert 5009 dictionaries to pytorch datasets (chunksize = 1002)...\n",
"06/24/2020 19:59:53 - INFO - farm.data_handler.data_silo - 0 \n",
"06/24/2020 19:59:53 - INFO - farm.data_handler.data_silo - /w\\\n",
"06/24/2020 19:59:53 - INFO - farm.data_handler.data_silo - /'\\\n",
"06/24/2020 19:59:53 - INFO - farm.data_handler.data_silo - \n",
"Preprocessing Dataset ../data/germeval18/train.tsv: 0%| | 0/5009 [00:00<?, ? Dicts/s]06/24/2020 19:59:55 - INFO - farm.data_handler.processor - *** Show 2 random examples ***\n",
"06/24/2020 19:59:55 - INFO - farm.data_handler.processor - \n",
"\n",
" .--. _____ _ \n",
" .'_\\/_'. / ____| | | \n",
" '. /\\ .' | (___ __ _ _ __ ___ _ __ | | ___ \n",
" \"||\" \\___ \\ / _` | '_ ` _ \\| '_ \\| |/ _ \\ \n",
" || /\\ ____) | (_| | | | | | | |_) | | __/\n",
" /\\ ||//\\) |_____/ \\__,_|_| |_| |_| .__/|_|\\___|\n",
" (/\\||/ |_| \n",
"______\\||/___________________________________________ \n",
"\n",
"ID: 103-0\n",
"Clear Text: \n",
" \ttext: @Caro22one @CroqiSunshine Beim besten Willen, ich kann die nicht in Schutz nehmen. Kuenast hat viele Menschen, die am Existenzminimum leben auf hohe Geldstrafen verklagt. Wenn ein \"Flüchtling\" Amok läuft, fordert sie Täterschutz. Ich bekomme auch Hate im Internet; ignoriere solche Dumpfbacken...\n",
" \ttext_classification_label: OFFENSE\n",
"Tokenized: \n",
" \ttokens: ['@', 'Car', '##o', '##22', '##one', '@', 'Cro', '##q', '##i', '##Su', '##ns', '##hin', '##e', 'Beim', 'besten', 'Willen', ',', 'ich', 'kann', 'die', 'nicht', 'in', 'Schutz', 'nehmen', '.', 'Ku', '##ena', '##st', 'hat', 'viele', 'Menschen', ',', 'die', 'am', 'Existenz', '##min', '##imum', 'leben', 'auf', 'hohe', 'Geldstrafe', '##n', 'verk', '##lagt', '.', 'Wenn', 'ein', '\"', 'Flüchtling', '\"', 'Am', '##ok', 'läuft', ',', 'fordert', 'sie', 'Täter', '##schutz', '.', 'Ich', 'bekom', '##me']\n",
" \toffsets: [0, 1, 4, 5, 7, 11, 12, 15, 16, 17, 19, 21, 24, 26, 31, 38, 44, 47, 51, 56, 60, 66, 69, 76, 82, 84, 86, 89, 92, 96, 102, 110, 112, 116, 119, 127, 130, 135, 141, 145, 150, 160, 162, 166, 170, 172, 177, 181, 182, 192, 194, 196, 199, 204, 206, 214, 218, 223, 229, 231, 235, 240]\n",
" \tstart_of_word: [True, False, False, False, False, True, False, False, False, False, False, False, False, True, True, True, False, True, True, True, True, True, True, True, False, True, False, False, True, True, True, False, True, True, True, False, False, True, True, True, True, False, True, False, False, True, True, True, False, False, True, False, True, False, True, True, True, False, False, True, True, False]\n",
"Features: \n",
" \tinput_ids: [3, 26991, 1701, 26910, 5484, 1949, 26991, 9188, 26970, 26899, 13355, 12377, 889, 26897, 3793, 3875, 7468, 26918, 1169, 479, 30, 149, 50, 2045, 3513, 26914, 3412, 4749, 13, 193, 1480, 1075, 26918, 30, 235, 7650, 734, 22233, 2564, 115, 4154, 22071, 26898, 3975, 10114, 26914, 2111, 39, 26944, 10868, 26944, 570, 493, 6695, 26918, 8559, 213, 6438, 1244, 26914, 1671, 6860, 373, 4]\n",
" \tpadding_mask: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n",
" \tsegment_ids: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \ttext_classification_label_ids: [1]\n",
"_____________________________________________________\n",
"06/24/2020 19:59:55 - INFO - farm.data_handler.processor - \n",
"\n",
" .--. _____ _ \n",
" .'_\\/_'. / ____| | | \n",
" '. /\\ .' | (___ __ _ _ __ ___ _ __ | | ___ \n",
" \"||\" \\___ \\ / _` | '_ ` _ \\| '_ \\| |/ _ \\ \n",
" || /\\ ____) | (_| | | | | | | |_) | | __/\n",
" /\\ ||//\\) |_____/ \\__,_|_| |_| |_| .__/|_|\\___|\n",
" (/\\||/ |_| \n",
"______\\||/___________________________________________ \n",
"\n",
"ID: 805-0\n",
"Clear Text: \n",
" \ttext: @Ami66Stefan Eine Zeile aus dem Templer Codex. Lieber sterben als ohne Ehre Leben.\n",
" \ttext_classification_label: OTHER\n",
"Tokenized: \n",
" \ttokens: ['@', 'Am', '##i', '##66', '##Ste', '##fan', 'Eine', 'Zeile', 'aus', 'dem', 'Temp', '##ler', 'Code', '##x', '.', 'Lieber', 'sterben', 'als', 'ohne', 'Ehre', 'Leben', '.']\n",
" \toffsets: [0, 1, 3, 4, 6, 9, 13, 18, 24, 28, 32, 36, 40, 44, 45, 47, 54, 62, 66, 71, 76, 81]\n",
" \tstart_of_word: [True, False, False, False, False, False, True, True, True, True, True, False, True, False, False, True, True, True, True, True, True, False]\n",
"Features: \n",
" \tinput_ids: [3, 26991, 570, 26899, 7633, 14730, 3979, 917, 25325, 147, 128, 14236, 450, 20811, 26965, 26914, 23613, 13781, 153, 935, 19081, 826, 26914, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \tpadding_mask: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \tsegment_ids: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \ttext_classification_label_ids: [0]\n",
"_____________________________________________________\n",
"Preprocessing Dataset ../data/germeval18/train.tsv: 100%|██████████| 5009/5009 [00:05<00:00, 891.64 Dicts/s]\n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - Loading dev set as a slice of train set\n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - Took 1001 samples out of train set to create dev set (dev split is roughly 0.1)\n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - Loading test set from: ../data/germeval18/test.tsv\n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - Got ya 1 parallel workers to convert 3532 dictionaries to pytorch datasets (chunksize = 707)...\n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - 0 \n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - /w\\\n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - / \\\n",
"06/24/2020 19:59:59 - INFO - farm.data_handler.data_silo - \n",
"Preprocessing Dataset ../data/germeval18/test.tsv: 0%| | 0/3532 [00:00<?, ? Dicts/s]06/24/2020 20:00:00 - INFO - farm.data_handler.processor - *** Show 2 random examples ***\n",
"06/24/2020 20:00:00 - INFO - farm.data_handler.processor - \n",
"\n",
" .--. _____ _ \n",
" .'_\\/_'. / ____| | | \n",
" '. /\\ .' | (___ __ _ _ __ ___ _ __ | | ___ \n",
" \"||\" \\___ \\ / _` | '_ ` _ \\| '_ \\| |/ _ \\ \n",
" || /\\ ____) | (_| | | | | | | |_) | | __/\n",
" /\\ ||//\\) |_____/ \\__,_|_| |_| |_| .__/|_|\\___|\n",
" (/\\||/ |_| \n",
"______\\||/___________________________________________ \n",
"\n",
"ID: 624-0\n",
"Clear Text: \n",
" \ttext: @FrauSchmauke @_StultaMundi Üblicherweise werden Ehen von Frauen gewollt. Es entspricht ihrem Bedürfnis nach Romantik und Versorgung.\n",
" \ttext_classification_label: OFFENSE\n",
"Tokenized: \n",
" \ttokens: ['@', 'Frau', '##Schm', '##au', '##ke', '@', '_', 'Stu', '##lt', '##a', '##M', '##und', '##i', 'Üb', '##licherweise', 'werden', 'Ehe', '##n', 'von', 'Frauen', 'gewollt', '.', 'Es', 'entspricht', 'ihrem', 'Bedürfnis', 'nach', 'Roman', '##ti', '##k', 'und', 'Versorgung', '.']\n",
" \toffsets: [0, 1, 5, 9, 11, 14, 15, 16, 19, 21, 22, 23, 26, 28, 30, 42, 49, 52, 54, 58, 65, 72, 74, 77, 88, 94, 104, 109, 114, 116, 118, 122, 132]\n",
" \tstart_of_word: [True, False, False, False, False, True, False, False, False, False, False, False, False, True, False, True, True, False, True, True, True, False, True, True, True, True, True, True, False, False, True, True, False]\n",
"Features: \n",
" \tinput_ids: [3, 26991, 946, 21121, 162, 772, 26991, 26983, 3248, 362, 26903, 26929, 29, 26899, 3395, 8832, 266, 2307, 26898, 88, 1895, 24192, 26914, 482, 4430, 1461, 21719, 188, 3529, 15099, 26917, 42, 5564, 26914, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \tpadding_mask: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \tsegment_ids: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \ttext_classification_label_ids: [1]\n",
"_____________________________________________________\n",
"06/24/2020 20:00:00 - INFO - farm.data_handler.processor - \n",
"\n",
" .--. _____ _ \n",
" .'_\\/_'. / ____| | | \n",
" '. /\\ .' | (___ __ _ _ __ ___ _ __ | | ___ \n",
" \"||\" \\___ \\ / _` | '_ ` _ \\| '_ \\| |/ _ \\ \n",
" || /\\ ____) | (_| | | | | | | |_) | | __/\n",
" /\\ ||//\\) |_____/ \\__,_|_| |_| |_| .__/|_|\\___|\n",
" (/\\||/ |_| \n",
"______\\||/___________________________________________ \n",
"\n",
"ID: 198-0\n",
"Clear Text: \n",
" \ttext: Dieses mobile Stoffgefängnis für Frauen gehört weder zu unserer Werte- noch zu unserer demokratischen Gesellschaft.\n",
" \ttext_classification_label: OTHER\n",
"Tokenized: \n",
" \ttokens: ['Dieses', 'mobile', 'Stoff', '##gef', '##ängnis', 'für', 'Frauen', 'gehört', 'weder', 'zu', 'unserer', 'Werte', '-', 'noch', 'zu', 'unserer', 'demokratischen', 'Gesellschaft', '.']\n",
" \toffsets: [0, 7, 14, 19, 22, 29, 33, 40, 47, 53, 56, 64, 69, 71, 76, 79, 87, 102, 114]\n",
" \tstart_of_word: [True, True, True, False, False, True, True, True, True, True, True, True, False, True, True, True, True, True, False]\n",
"Features: \n",
" \tinput_ids: [3, 3598, 25970, 8697, 2289, 5987, 142, 1895, 1854, 3175, 81, 8174, 8388, 26935, 357, 81, 8174, 17276, 2183, 26914, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \tpadding_mask: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \tsegment_ids: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
" \ttext_classification_label_ids: [0]\n",
"_____________________________________________________\n",
"Preprocessing Dataset ../data/germeval18/test.tsv: 100%|██████████| 3532/3532 [00:03<00:00, 1000.56 Dicts/s]\n",
"06/24/2020 20:00:03 - INFO - farm.data_handler.data_silo - Examples in train: 4008\n",
"06/24/2020 20:00:03 - INFO - farm.data_handler.data_silo - Examples in dev : 1001\n",
"06/24/2020 20:00:03 - INFO - farm.data_handler.data_silo - Examples in test : 3532\n",
"06/24/2020 20:00:03 - INFO - farm.data_handler.data_silo - \n",
"06/24/2020 20:00:03 - INFO - farm.data_handler.data_silo - Longest sequence length observed after clipping: 64\n",
"06/24/2020 20:00:03 - INFO - farm.data_handler.data_silo - Average sequence length after clipping: 38.0753493013972\n",
"06/24/2020 20:00:03 - INFO - farm.data_handler.data_silo - Proportion clipped: 0.11676646706586827\n",
"06/24/2020 20:00:04 - INFO - filelock - Lock 140320089886616 acquired on /root/.cache/torch/transformers/e653e2fe0970d519c5a3b6c0286e1630ad2f0eade78f82b4916ec945d6f06d48.4d552623f46c71e8e0c1a0eb4e59da5816bca7958d32ae9290d9de4cee162c6b.lock\n",
"06/24/2020 20:00:04 - INFO - transformers.file_utils - https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json not found in cache or force_download set to True, downloading to /root/.cache/torch/transformers/tmpjaquakot\n"
],
"name": "stderr"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ec3cfe686ab64c71b784a09e9509dc72",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=433.0, style=ProgressStyle(description_…"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"06/24/2020 20:00:05 - INFO - transformers.file_utils - storing https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json in cache at /root/.cache/torch/transformers/e653e2fe0970d519c5a3b6c0286e1630ad2f0eade78f82b4916ec945d6f06d48.4d552623f46c71e8e0c1a0eb4e59da5816bca7958d32ae9290d9de4cee162c6b\n",
"06/24/2020 20:00:05 - INFO - transformers.file_utils - creating metadata file for /root/.cache/torch/transformers/e653e2fe0970d519c5a3b6c0286e1630ad2f0eade78f82b4916ec945d6f06d48.4d552623f46c71e8e0c1a0eb4e59da5816bca7958d32ae9290d9de4cee162c6b\n",
"06/24/2020 20:00:05 - INFO - filelock - Lock 140320089886616 released on /root/.cache/torch/transformers/e653e2fe0970d519c5a3b6c0286e1630ad2f0eade78f82b4916ec945d6f06d48.4d552623f46c71e8e0c1a0eb4e59da5816bca7958d32ae9290d9de4cee162c6b.lock\n"
],
"name": "stderr"
},
{
"output_type": "stream",
"text": [
"\n"
],
"name": "stdout"
},
{
"output_type": "stream",
"text": [
"06/24/2020 20:00:05 - INFO - filelock - Lock 140320089885552 acquired on /root/.cache/torch/transformers/c7032c48d440a7822edfa87a49d7312f4f51efe30635a118e8e9e47dd5cda59e.4e5eda3a0f09b32a0b7d1a9185034da1b3506d5c5b0c6880a7ca0122ab5eef2e.lock\n",
"06/24/2020 20:00:05 - INFO - transformers.file_utils - https://cdn.huggingface.co/bert-base-german-cased-pytorch_model.bin not found in cache or force_download set to True, downloading to /root/.cache/torch/transformers/tmpxi1bjrrx\n"
],
"name": "stderr"
},
{
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "bc31dc04151c43fbb1c1206628143337",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=438869143.0, style=ProgressStyle(descri…"
]
},
"metadata": {
"tags": []
}
},
{
"output_type": "stream",
"text": [
"06/24/2020 20:00:46 - INFO - transformers.file_utils - storing https://cdn.huggingface.co/bert-base-german-cased-pytorch_model.bin in cache at /root/.cache/torch/transformers/c7032c48d440a7822edfa87a49d7312f4f51efe30635a118e8e9e47dd5cda59e.4e5eda3a0f09b32a0b7d1a9185034da1b3506d5c5b0c6880a7ca0122ab5eef2e\n",
"06/24/2020 20:00:46 - INFO - transformers.file_utils - creating metadata file for /root/.cache/torch/transformers/c7032c48d440a7822edfa87a49d7312f4f51efe30635a118e8e9e47dd5cda59e.4e5eda3a0f09b32a0b7d1a9185034da1b3506d5c5b0c6880a7ca0122ab5eef2e\n",
"06/24/2020 20:00:46 - INFO - filelock - Lock 140320089885552 released on /root/.cache/torch/transformers/c7032c48d440a7822edfa87a49d7312f4f51efe30635a118e8e9e47dd5cda59e.4e5eda3a0f09b32a0b7d1a9185034da1b3506d5c5b0c6880a7ca0122ab5eef2e.lock\n",
"06/24/2020 20:00:46 - INFO - transformers.modeling_utils - loading weights file https://cdn.huggingface.co/bert-base-german-cased-pytorch_model.bin from cache at /root/.cache/torch/transformers/c7032c48d440a7822edfa87a49d7312f4f51efe30635a118e8e9e47dd5cda59e.4e5eda3a0f09b32a0b7d1a9185034da1b3506d5c5b0c6880a7ca0122ab5eef2e\n"
],
"name": "stderr"
},
{
"output_type": "stream",
"text": [
"\n"
],
"name": "stdout"
},
{
"output_type": "stream",
"text": [
"06/24/2020 20:00:49 - INFO - farm.modeling.language_model - Automatically detected language from language model name: german\n",
"06/24/2020 20:00:49 - INFO - farm.modeling.prediction_head - Prediction head initialized with size [768, 2]\n",
"06/24/2020 20:00:49 - INFO - farm.modeling.prediction_head - Using class weights for task 'text_classification': [0.75489455 1.4807976 ]\n",
"06/24/2020 20:00:53 - INFO - farm.modeling.optimization - Loading optimizer `TransformersAdamW`: '{'correct_bias': False, 'weight_decay': 0.01, 'lr': 5e-06}'\n",
"06/24/2020 20:00:53 - INFO - farm.modeling.optimization - Using scheduler 'get_linear_schedule_with_warmup'\n",
"06/24/2020 20:00:53 - INFO - farm.modeling.optimization - Loading schedule `get_linear_schedule_with_warmup`: '{'num_warmup_steps': 252.0, 'num_training_steps': 2520}'\n",
"06/24/2020 20:00:53 - INFO - farm.train - \n",
" \n",
"\n",
" &&& && & && _____ _ \n",
" && &\\/&\\|& ()|/ @, && / ____| (_) \n",
" &\\/(/&/&||/& /_/)_&/_& | | __ _ __ _____ ___ _ __ __ _ \n",
" &() &\\/&|()|/&\\/ '%\" & () | | |_ | '__/ _ \\ \\ /\\ / / | '_ \\ / _` |\n",
" &_\\_&&_\\ |& |&&/&__%_/_& && | |__| | | | (_) \\ V V /| | | | | (_| |\n",
"&& && & &| &| /& & % ()& /&& \\_____|_| \\___/ \\_/\\_/ |_|_| |_|\\__, |\n",
" ()&_---()&\\&\\|&&-&&--%---()~ __/ |\n",
" && \\||| |___/\n",
" |||\n",
" |||\n",
" |||\n",
" , -=-~ .-^- _\n",
" `\n",
"\n",
"Train epoch 0/19 (Cur. train loss: 0.5841): 79%|███████▉ | 100/126 [00:37<00:10, 2.56it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.59it/s]\n",
"06/24/2020 20:01:35 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 100 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:01:35 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:01:35 - INFO - farm.eval - loss: 0.6368016953711266\n",
"06/24/2020 20:01:35 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:01:35 - INFO - farm.eval - acc: {'acc': 0.7142857142857143}\n",
"06/24/2020 20:01:35 - INFO - farm.eval - f1_other: 0.7712\n",
"06/24/2020 20:01:35 - INFO - farm.eval - f1_offense: 0.6196808510638299\n",
"06/24/2020 20:01:35 - INFO - farm.eval - f1_macro: 0.6954404255319149\n",
"06/24/2020 20:01:35 - INFO - farm.eval - f1_micro: 0.6954404255319149\n",
"06/24/2020 20:01:35 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8253 0.7237 0.7712 666\n",
" OFFENSE 0.5588 0.6955 0.6197 335\n",
"\n",
" accuracy 0.7143 1001\n",
" macro avg 0.6920 0.7096 0.6954 1001\n",
"weighted avg 0.7361 0.7143 0.7205 1001\n",
"\n",
"06/24/2020 20:01:35 - INFO - farm.train - Saving current best model to saved_models/bert-german-doc-tutorial-es, eval=0.6196808510638299\n",
"Train epoch 0/19 (Cur. train loss: 0.5787): 100%|██████████| 126/126 [00:53<00:00, 2.35it/s]\n",
"Train epoch 1/19 (Cur. train loss: 0.4429): 59%|█████▊ | 74/126 [00:30<00:22, 2.35it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 6.77it/s]\n",
"06/24/2020 20:02:22 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 200 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:02:22 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:02:22 - INFO - farm.eval - loss: 0.45832151037591556\n",
"06/24/2020 20:02:22 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:02:22 - INFO - farm.eval - acc: {'acc': 0.7602397602397603}\n",
"06/24/2020 20:02:22 - INFO - farm.eval - f1_other: 0.8013245033112583\n",
"06/24/2020 20:02:22 - INFO - farm.eval - f1_offense: 0.6977329974811084\n",
"06/24/2020 20:02:23 - INFO - farm.eval - f1_macro: 0.7495287503961834\n",
"06/24/2020 20:02:23 - INFO - farm.eval - f1_micro: 0.7495287503961834\n",
"06/24/2020 20:02:23 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8930 0.7267 0.8013 666\n",
" OFFENSE 0.6035 0.8269 0.6977 335\n",
"\n",
" accuracy 0.7602 1001\n",
" macro avg 0.7482 0.7768 0.7495 1001\n",
"weighted avg 0.7961 0.7602 0.7667 1001\n",
"\n",
"06/24/2020 20:02:23 - INFO - farm.train - Saving current best model to saved_models/bert-german-doc-tutorial-es, eval=0.6977329974811084\n",
"Train epoch 1/19 (Cur. train loss: 0.2481): 100%|██████████| 126/126 [00:58<00:00, 2.17it/s]\n",
"Train epoch 2/19 (Cur. train loss: 0.1989): 38%|███▊ | 48/126 [00:19<00:32, 2.42it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.07it/s]\n",
"06/24/2020 20:03:10 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 300 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:03:10 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:03:10 - INFO - farm.eval - loss: 0.45097471212411855\n",
"06/24/2020 20:03:10 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:03:10 - INFO - farm.eval - acc: {'acc': 0.7892107892107892}\n",
"06/24/2020 20:03:10 - INFO - farm.eval - f1_other: 0.8334648776637728\n",
"06/24/2020 20:03:10 - INFO - farm.eval - f1_offense: 0.7129251700680271\n",
"06/24/2020 20:03:10 - INFO - farm.eval - f1_macro: 0.7731950238659\n",
"06/24/2020 20:03:10 - INFO - farm.eval - f1_micro: 0.7731950238659\n",
"06/24/2020 20:03:10 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8785 0.7928 0.8335 666\n",
" OFFENSE 0.6550 0.7821 0.7129 335\n",
"\n",
" accuracy 0.7892 1001\n",
" macro avg 0.7668 0.7874 0.7732 1001\n",
"weighted avg 0.8037 0.7892 0.7931 1001\n",
"\n",
"06/24/2020 20:03:10 - INFO - farm.train - Saving current best model to saved_models/bert-german-doc-tutorial-es, eval=0.7129251700680271\n",
"Train epoch 2/19 (Cur. train loss: 0.0268): 100%|██████████| 126/126 [00:57<00:00, 2.18it/s]\n",
"Train epoch 3/19 (Cur. train loss: 0.1104): 17%|█▋ | 22/126 [00:09<00:43, 2.40it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.06it/s]\n",
"06/24/2020 20:03:57 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 400 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:03:57 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:03:57 - INFO - farm.eval - loss: 0.7927843431135515\n",
"06/24/2020 20:03:57 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:03:57 - INFO - farm.eval - acc: {'acc': 0.8061938061938062}\n",
"06/24/2020 20:03:57 - INFO - farm.eval - f1_other: 0.8596237337192475\n",
"06/24/2020 20:03:57 - INFO - farm.eval - f1_offense: 0.6870967741935484\n",
"06/24/2020 20:03:57 - INFO - farm.eval - f1_macro: 0.773360253956398\n",
"06/24/2020 20:03:57 - INFO - farm.eval - f1_micro: 0.773360253956398\n",
"06/24/2020 20:03:57 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8296 0.8919 0.8596 666\n",
" OFFENSE 0.7474 0.6358 0.6871 335\n",
"\n",
" accuracy 0.8062 1001\n",
" macro avg 0.7885 0.7639 0.7734 1001\n",
"weighted avg 0.8021 0.8062 0.8019 1001\n",
"\n",
"Train epoch 3/19 (Cur. train loss: 0.0047): 97%|█████████▋| 122/126 [00:55<00:01, 2.43it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.04it/s]\n",
"06/24/2020 20:04:43 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 500 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:04:43 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:04:43 - INFO - farm.eval - loss: 0.9255302364414151\n",
"06/24/2020 20:04:43 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:04:43 - INFO - farm.eval - acc: {'acc': 0.7932067932067932}\n",
"06/24/2020 20:04:43 - INFO - farm.eval - f1_other: 0.8487947406866325\n",
"06/24/2020 20:04:43 - INFO - farm.eval - f1_offense: 0.6729857819905213\n",
"06/24/2020 20:04:43 - INFO - farm.eval - f1_macro: 0.7608902613385768\n",
"06/24/2020 20:04:43 - INFO - farm.eval - f1_micro: 0.7608902613385768\n",
"06/24/2020 20:04:43 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8265 0.8724 0.8488 666\n",
" OFFENSE 0.7148 0.6358 0.6730 335\n",
"\n",
" accuracy 0.7932 1001\n",
" macro avg 0.7706 0.7541 0.7609 1001\n",
"weighted avg 0.7891 0.7932 0.7900 1001\n",
"\n",
"Train epoch 3/19 (Cur. train loss: 0.0020): 100%|██████████| 126/126 [01:01<00:00, 2.06it/s]\n",
"Train epoch 4/19 (Cur. train loss: 0.0047): 76%|███████▌ | 96/126 [00:39<00:12, 2.42it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.00it/s]\n",
"06/24/2020 20:05:29 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 600 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:05:29 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:05:29 - INFO - farm.eval - loss: 1.0570004422228771\n",
"06/24/2020 20:05:29 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:05:29 - INFO - farm.eval - acc: {'acc': 0.8011988011988012}\n",
"06/24/2020 20:05:29 - INFO - farm.eval - f1_other: 0.8556925308194343\n",
"06/24/2020 20:05:29 - INFO - farm.eval - f1_offense: 0.680577849117175\n",
"06/24/2020 20:05:29 - INFO - farm.eval - f1_macro: 0.7681351899683047\n",
"06/24/2020 20:05:29 - INFO - farm.eval - f1_micro: 0.7681351899683047\n",
"06/24/2020 20:05:29 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8275 0.8859 0.8557 666\n",
" OFFENSE 0.7361 0.6328 0.6806 335\n",
"\n",
" accuracy 0.8012 1001\n",
" macro avg 0.7818 0.7594 0.7681 1001\n",
"weighted avg 0.7969 0.8012 0.7971 1001\n",
"\n",
"Train epoch 4/19 (Cur. train loss: 0.0006): 100%|██████████| 126/126 [00:56<00:00, 2.22it/s]\n",
"Train epoch 5/19 (Cur. train loss: 0.0015): 56%|█████▌ | 70/126 [00:29<00:23, 2.42it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.03it/s]\n",
"06/24/2020 20:06:15 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 700 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:06:15 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:06:15 - INFO - farm.eval - loss: 1.5737790613622218\n",
"06/24/2020 20:06:15 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:06:15 - INFO - farm.eval - acc: {'acc': 0.7852147852147852}\n",
"06/24/2020 20:06:15 - INFO - farm.eval - f1_other: 0.8528405201916496\n",
"06/24/2020 20:06:15 - INFO - farm.eval - f1_offense: 0.6025878003696857\n",
"06/24/2020 20:06:15 - INFO - farm.eval - f1_macro: 0.7277141602806676\n",
"06/24/2020 20:06:15 - INFO - farm.eval - f1_micro: 0.7277141602806676\n",
"06/24/2020 20:06:15 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.7836 0.9354 0.8528 666\n",
" OFFENSE 0.7913 0.4866 0.6026 335\n",
"\n",
" accuracy 0.7852 1001\n",
" macro avg 0.7875 0.7110 0.7277 1001\n",
"weighted avg 0.7862 0.7852 0.7691 1001\n",
"\n",
"Train epoch 5/19 (Cur. train loss: 0.0003): 100%|██████████| 126/126 [00:56<00:00, 2.23it/s]\n",
"Train epoch 6/19 (Cur. train loss: 0.0068): 35%|███▍ | 44/126 [00:18<00:33, 2.41it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.01it/s]\n",
"06/24/2020 20:07:00 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 800 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:07:00 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:07:00 - INFO - farm.eval - loss: 1.29132007194923\n",
"06/24/2020 20:07:00 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:07:00 - INFO - farm.eval - acc: {'acc': 0.8041958041958042}\n",
"06/24/2020 20:07:00 - INFO - farm.eval - f1_other: 0.8569343065693431\n",
"06/24/2020 20:07:00 - INFO - farm.eval - f1_offense: 0.689873417721519\n",
"06/24/2020 20:07:00 - INFO - farm.eval - f1_macro: 0.7734038621454311\n",
"06/24/2020 20:07:00 - INFO - farm.eval - f1_micro: 0.7734038621454311\n",
"06/24/2020 20:07:00 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8338 0.8814 0.8569 666\n",
" OFFENSE 0.7340 0.6507 0.6899 335\n",
"\n",
" accuracy 0.8042 1001\n",
" macro avg 0.7839 0.7661 0.7734 1001\n",
"weighted avg 0.8004 0.8042 0.8010 1001\n",
"\n",
"Train epoch 6/19 (Cur. train loss: 0.0003): 100%|██████████| 126/126 [00:56<00:00, 2.23it/s]\n",
"Train epoch 7/19 (Cur. train loss: 0.0229): 14%|█▍ | 18/126 [00:07<00:44, 2.42it/s]\n",
"Evaluating: 100%|██████████| 32/32 [00:04<00:00, 7.05it/s]\n",
"06/24/2020 20:07:46 - INFO - farm.eval - \n",
"\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"***************************************************\n",
"***** EVALUATION | DEV SET | AFTER 900 BATCHES *****\n",
"***************************************************\n",
"\\\\|// \\\\|// \\\\|// \\\\|// \\\\|//\n",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"\n",
"06/24/2020 20:07:46 - INFO - farm.eval - \n",
" _________ text_classification _________\n",
"06/24/2020 20:07:46 - INFO - farm.eval - loss: 1.3771745775129411\n",
"06/24/2020 20:07:46 - INFO - farm.eval - task_name: text_classification\n",
"06/24/2020 20:07:46 - INFO - farm.eval - acc: {'acc': 0.7932067932067932}\n",
"06/24/2020 20:07:46 - INFO - farm.eval - f1_other: 0.8507570295602019\n",
"06/24/2020 20:07:46 - INFO - farm.eval - f1_offense: 0.6634146341463414\n",
"06/24/2020 20:07:46 - INFO - farm.eval - f1_macro: 0.7570858318532716\n",
"06/24/2020 20:07:46 - INFO - farm.eval - f1_micro: 0.7570858318532716\n",
"06/24/2020 20:07:46 - INFO - farm.eval - report: \n",
" precision recall f1-score support\n",
"\n",
" OTHER 0.8183 0.8859 0.8508 666\n",
" OFFENSE 0.7286 0.6090 0.6634 335\n",
"\n",
" accuracy 0.7932 1001\n",
" macro avg 0.7734 0.7474 0.7571 1001\n",
"weighted avg 0.7883 0.7932 0.7881 1001\n",
"\n",
"06/24/2020 20:07:46 - INFO - farm.train - STOPPING EARLY AT EPOCH 7, STEP 18, EVALUATION 9\n",
"06/24/2020 20:07:46 - INFO - farm.train - Restoring best model so far from saved_models/bert-german-doc-tutorial-es\n",
"06/24/2020 20:07:46 - INFO - transformers.modeling_utils - loading weights file saved_models/bert-german-doc-tutorial-es/language_model.bin from cache at saved_models/bert-german-doc-tutorial-es/language_model.bin\n",
"06/24/2020 20:07:48 - INFO - farm.modeling.adaptive_model - Found files for loading 1 prediction heads\n",
"06/24/2020 20:07:48 - WARNING - farm.modeling.prediction_head - `layer_dims` will be deprecated in future releases\n",
"06/24/2020 20:07:48 - INFO - farm.modeling.prediction_head - Prediction head initialized with size [768, 2]\n",
"06/24/2020 20:07:48 - INFO - farm.modeling.prediction_head - Loading prediction head from saved_models/bert-german-doc-tutorial-es/prediction_head_0.bin\n"
],
"name": "stderr"
},
{
"output_type": "error",
"ename": "RuntimeError",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-3-66a7ddd46d8d>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 161\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m__name__\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"__main__\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 162\u001b[0;31m \u001b[0mdoc_classification_with_earlystopping\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 163\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;31m# fmt: on\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-3-66a7ddd46d8d>\u001b[0m in \u001b[0;36mdoc_classification_with_earlystopping\u001b[0;34m()\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 129\u001b[0m \u001b[0;31m# 7. Let it grow\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 130\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 131\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 132\u001b[0m \u001b[0;31m# 8. Hooray! You have a model.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/content/FARM/farm/train.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 352\u001b[0m \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Restoring best model so far from {}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mearly_stopping\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_dir\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 353\u001b[0m \u001b[0mlm_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlanguage_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 354\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mAdaptiveModel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mearly_stopping\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_dir\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlm_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlm_name\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 355\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconnect_heads_with_processor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_silo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprocessor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtasks\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequire_labels\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 356\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/content/FARM/farm/modeling/adaptive_model.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(cls, load_dir, device, strict, lm_name, processor)\u001b[0m\n\u001b[1;32m 321\u001b[0m \u001b[0mph_output_type\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 322\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mconfig_file\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mph_config_files\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 323\u001b[0;31m \u001b[0mhead\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mPredictionHead\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_file\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstrict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstrict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 324\u001b[0m \u001b[0mprediction_heads\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhead\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 325\u001b[0m \u001b[0mph_output_type\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhead\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mph_output_type\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/content/FARM/farm/modeling/prediction_head.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(cls, config_file, strict, load_weights)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0mmodel_file\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_model_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_file\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mconfig_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Loading prediction head from {}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 118\u001b[0;31m \u001b[0mprediction_head\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_state_dict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_file\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmap_location\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"cpu\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstrict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstrict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 119\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mprediction_head\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mload_state_dict\u001b[0;34m(self, state_dict, strict)\u001b[0m\n\u001b[1;32m 845\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merror_msgs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 846\u001b[0m raise RuntimeError('Error(s) in loading state_dict for {}:\\n\\t{}'.format(\n\u001b[0;32m--> 847\u001b[0;31m self.__class__.__name__, \"\\n\\t\".join(error_msgs)))\n\u001b[0m\u001b[1;32m 848\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_IncompatibleKeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmissing_keys\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0munexpected_keys\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 849\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mRuntimeError\u001b[0m: Error(s) in loading state_dict for TextClassificationHead:\n\tUnexpected key(s) in state_dict: \"loss_fct.weight\". "
]
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment