|
73 | 73 | "import time\n", |
74 | 74 | "import re\n", |
75 | 75 | "import sagemaker \n", |
76 | | - "role = get_execution_role()\n", |
| 76 | + "role = sagemaker.get_execution_role()\n", |
77 | 77 | "\n", |
78 | 78 | "# Now let's define the S3 bucket we'll used for the remainder of this example.\n", |
79 | 79 | "\n", |
|
340 | 340 | }, |
341 | 341 | "outputs": [], |
342 | 342 | "source": [ |
343 | | - "from sagemaker.amazon.amazon_estimator import get_image_uri\n", |
344 | | - "container = get_image_uri(boto3.Session().region_name, 'xgboost')" |
| 343 | + "from sagemaker.amazon.amazon_estimator import image_uris\n", |
| 344 | + "container = image_uris.retrieve(region=boto3.Session().region_name, framework='xgboost', version='1')" |
345 | 345 | ] |
346 | 346 | }, |
347 | 347 | { |
|
645 | 645 | " \n", |
646 | 646 | " for offset in range(0, items, batch_size):\n", |
647 | 647 | " if offset+batch_size < items:\n", |
648 | | - " datav = data.iloc[offset:(offset+batch_size),:].as_matrix()\n", |
| 648 | + " datav = data.iloc[offset:(offset+batch_size),:].values\n", |
649 | 649 | " results = do_predict(datav, endpoint_name, content_type)\n", |
650 | 650 | " arrs.extend(results)\n", |
651 | 651 | " else:\n", |
652 | | - " datav = data.iloc[offset:items,:].as_matrix()\n", |
| 652 | + " datav = data.iloc[offset:items,:].values\n", |
653 | 653 | " arrs.extend(do_predict(datav, endpoint_name, content_type))\n", |
654 | 654 | " sys.stdout.write('.')\n", |
655 | 655 | " return(arrs)" |
|
739 | 739 | "data_test = pd.read_csv(\"formatted_test.csv\", sep=',', header=None) \n", |
740 | 740 | "data_val = pd.read_csv(\"formatted_val.csv\", sep=',', header=None) \n", |
741 | 741 | "\n", |
742 | | - "train_y = data_train.iloc[:,0].as_matrix();\n", |
743 | | - "train_X = data_train.iloc[:,1:].as_matrix();\n", |
| 742 | + "train_y = data_train.iloc[:,0].values;\n", |
| 743 | + "train_X = data_train.iloc[:,1:].values;\n", |
744 | 744 | "\n", |
745 | | - "val_y = data_val.iloc[:,0].as_matrix();\n", |
746 | | - "val_X = data_val.iloc[:,1:].as_matrix();\n", |
| 745 | + "val_y = data_val.iloc[:,0].values;\n", |
| 746 | + "val_X = data_val.iloc[:,1:].values;\n", |
747 | 747 | "\n", |
748 | | - "test_y = data_test.iloc[:,0].as_matrix();\n", |
749 | | - "test_X = data_test.iloc[:,1:].as_matrix();\n" |
| 748 | + "test_y = data_test.iloc[:,0].values;\n", |
| 749 | + "test_X = data_test.iloc[:,1:].values;\n" |
750 | 750 | ] |
751 | 751 | }, |
752 | 752 | { |
|
826 | 826 | }, |
827 | 827 | "outputs": [], |
828 | 828 | "source": [ |
829 | | - "from sagemaker.amazon.amazon_estimator import get_image_uri\n", |
830 | | - "container = get_image_uri(boto3.Session().region_name, 'linear-learner')" |
| 829 | + "from sagemaker.amazon.amazon_estimator import image_uris\n", |
| 830 | + "container = image_uris.retrieve(region=boto3.Session().region_name, framework='linear-learner', version='1')" |
831 | 831 | ] |
832 | 832 | }, |
833 | 833 | { |
|
1097 | 1097 | " \n", |
1098 | 1098 | " for offset in range(0, items, batch_size):\n", |
1099 | 1099 | " if offset+batch_size < items:\n", |
1100 | | - " datav = data.iloc[offset:(offset+batch_size),:].as_matrix()\n", |
| 1100 | + " datav = data.iloc[offset:(offset+batch_size),:].values\n", |
1101 | 1101 | " results = do_predict_linear(datav, endpoint_name, content_type)\n", |
1102 | 1102 | " arrs.extend(results)\n", |
1103 | 1103 | " else:\n", |
1104 | | - " datav = data.iloc[offset:items,:].as_matrix()\n", |
| 1104 | + " datav = data.iloc[offset:items,:].values\n", |
1105 | 1105 | " arrs.extend(do_predict_linear(datav, endpoint_name, content_type))\n", |
1106 | 1106 | " sys.stdout.write('.')\n", |
1107 | 1107 | " return(arrs)" |
|
0 commit comments