18
18
from typing import Dict , List , NamedTuple , Optional , Sequence , Tuple , Union
19
19
20
20
from google .api_core import operation
21
+ from google .api_core import exceptions as api_exceptions
21
22
from google .auth import credentials as auth_credentials
22
23
24
+ from google .cloud import aiplatform
23
25
from google .cloud .aiplatform import base
24
26
from google .cloud .aiplatform import compat
25
27
from google .cloud .aiplatform import explain
@@ -119,6 +121,33 @@ def __init__(
119
121
credentials = credentials ,
120
122
)
121
123
124
+ @property
125
+ def traffic_split (self ) -> Dict [str , int ]:
126
+ """A map from a DeployedModel's ID to the percentage of this Endpoint's
127
+ traffic that should be forwarded to that DeployedModel.
128
+
129
+ If a DeployedModel's ID is not listed in this map, then it receives no traffic.
130
+
131
+ The traffic percentage values must add up to 100, or map must be empty if
132
+ the Endpoint is to not accept any traffic at a moment.
133
+ """
134
+ self ._sync_gca_resource ()
135
+ return dict (self ._gca_resource .traffic_split )
136
+
137
+ @property
138
+ def network (self ) -> Optional [str ]:
139
+ """The full name of the Google Compute Engine
140
+ [network](https://cloud.google.com/vpc/docs/vpc#networks) to which this
141
+ Endpoint should be peered.
142
+
143
+ Takes the format `projects/{project}/global/networks/{network}`. Where
144
+ {project} is a project number, as in `12345`, and {network} is a network name.
145
+
146
+ Private services access must already be configured for the network. If left
147
+ unspecified, the Endpoint is not peered with any network.
148
+ """
149
+ return getattr (self ._gca_resource , "network" )
150
+
122
151
@classmethod
123
152
def create (
124
153
cls ,
@@ -1211,12 +1240,13 @@ class Model(base.VertexAiResourceNounWithFutureManager):
1211
1240
_delete_method = "delete_model"
1212
1241
1213
1242
@property
1214
- def uri (self ):
1215
- """Uri of the model."""
1216
- return self ._gca_resource .artifact_uri
1243
+ def uri (self ) -> Optional [str ]:
1244
+ """Path to the directory containing the Model artifact and any of its
1245
+ supporting files. Not present for AutoML Models."""
1246
+ return self ._gca_resource .artifact_uri or None
1217
1247
1218
1248
@property
1219
- def description (self ):
1249
+ def description (self ) -> str :
1220
1250
"""Description of the model."""
1221
1251
return self ._gca_resource .description
1222
1252
@@ -1240,6 +1270,98 @@ def supported_export_formats(
1240
1270
for export_format in self ._gca_resource .supported_export_formats
1241
1271
}
1242
1272
1273
+ @property
1274
+ def supported_deployment_resources_types (
1275
+ self ,
1276
+ ) -> List [aiplatform .gapic .Model .DeploymentResourcesType ]:
1277
+ """List of deployment resource types accepted for this Model.
1278
+
1279
+ When this Model is deployed, its prediction resources are described by
1280
+ the `prediction_resources` field of the objects returned by
1281
+ `Endpoint.list_models()`. Because not all Models support all resource
1282
+ configuration types, the configuration types this Model supports are
1283
+ listed here.
1284
+
1285
+ If no configuration types are listed, the Model cannot be
1286
+ deployed to an `Endpoint` and does not support online predictions
1287
+ (`Endpoint.predict()` or `Endpoint.explain()`). Such a Model can serve
1288
+ predictions by using a `BatchPredictionJob`, if it has at least one entry
1289
+ each in `Model.supported_input_storage_formats` and
1290
+ `Model.supported_output_storage_formats`."""
1291
+ return list (self ._gca_resource .supported_deployment_resources_types )
1292
+
1293
+ @property
1294
+ def supported_input_storage_formats (self ) -> List [str ]:
1295
+ """The formats this Model supports in the `input_config` field of a
1296
+ `BatchPredictionJob`. If `Model.predict_schemata.instance_schema_uri`
1297
+ exists, the instances should be given as per that schema.
1298
+
1299
+ [Read the docs for more on batch prediction formats](https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions#batch_request_input)
1300
+
1301
+ If this Model doesn't support any of these formats it means it cannot be
1302
+ used with a `BatchPredictionJob`. However, if it has
1303
+ `supported_deployment_resources_types`, it could serve online predictions
1304
+ by using `Endpoint.predict()` or `Endpoint.explain()`.
1305
+ """
1306
+ return list (self ._gca_resource .supported_input_storage_formats )
1307
+
1308
+ @property
1309
+ def supported_output_storage_formats (self ) -> List [str ]:
1310
+ """The formats this Model supports in the `output_config` field of a
1311
+ `BatchPredictionJob`.
1312
+
1313
+ If both `Model.predict_schemata.instance_schema_uri` and
1314
+ `Model.predict_schemata.prediction_schema_uri` exist, the predictions
1315
+ are returned together with their instances. In other words, the
1316
+ prediction has the original instance data first, followed by the actual
1317
+ prediction content (as per the schema).
1318
+
1319
+ [Read the docs for more on batch prediction formats](https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions)
1320
+
1321
+ If this Model doesn't support any of these formats it means it cannot be
1322
+ used with a `BatchPredictionJob`. However, if it has
1323
+ `supported_deployment_resources_types`, it could serve online predictions
1324
+ by using `Endpoint.predict()` or `Endpoint.explain()`.
1325
+ """
1326
+ return list (self ._gca_resource .supported_output_storage_formats )
1327
+
1328
+ @property
1329
+ def predict_schemata (self ) -> Optional [aiplatform .gapic .PredictSchemata ]:
1330
+ """The schemata that describe formats of the Model's predictions and
1331
+ explanations, if available."""
1332
+ return getattr (self ._gca_resource , "predict_schemata" )
1333
+
1334
+ @property
1335
+ def training_job (self ) -> Optional ["aiplatform.training_jobs._TrainingJob" ]:
1336
+ """The TrainingJob that uploaded this Model, if any.
1337
+
1338
+ Raises:
1339
+ api_core.exceptions.NotFound: If the Model's training job resource
1340
+ cannot be found on the Vertex service.
1341
+ """
1342
+ job_name = getattr (self ._gca_resource , "training_pipeline" )
1343
+
1344
+ if not job_name :
1345
+ return None
1346
+
1347
+ try :
1348
+ return aiplatform .training_jobs ._TrainingJob ._get_and_return_subclass (
1349
+ resource_name = job_name ,
1350
+ project = self .project ,
1351
+ location = self .location ,
1352
+ credentials = self .credentials ,
1353
+ )
1354
+ except api_exceptions .NotFound :
1355
+ raise api_exceptions .NotFound (
1356
+ f"The training job used to create this model could not be found: { job_name } "
1357
+ )
1358
+
1359
+ @property
1360
+ def container_spec (self ) -> Optional [aiplatform .gapic .ModelContainerSpec ]:
1361
+ """The specification of the container that is to be used when deploying
1362
+ this Model. Not present for AutoML Models."""
1363
+ return getattr (self ._gca_resource , "container_spec" )
1364
+
1243
1365
def __init__ (
1244
1366
self ,
1245
1367
model_name : str ,
0 commit comments