Skip to content

Commit afc2b3f

Browse files
authored
feat(e2e-tests): model deployment and inference of test models from python tests to bdd (#7007)
* refactor feature to use scenario outline * refactor inference with do methods * preparation for default models in test cases * trace for watcher events * default model inference * fix errors in godog repeating twice * include body response when http errors occur * tidy up model responses * more models for basic inferencing * suppress some of the comments
1 parent cc5183b commit afc2b3f

File tree

6 files changed

+156
-64
lines changed

6 files changed

+156
-64
lines changed

tests/integration/godog/features/model/deployment.feature

Lines changed: 16 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,28 @@ Feature: Model deployment
44
As a model user
55
I need to create a Model resource and verify it is deployed
66

7-
Scenario: Success - Load a model
8-
Given I have an "iris" model
7+
Scenario Outline: Success - Load a <model> model
8+
Given I have an "<model>" model
99
When the model is applied
1010
Then the model should eventually become Ready
1111

12+
Examples:
13+
| model |
14+
| iris |
15+
| income-xgb |
16+
| mnist-onnx |
17+
| income-lgb |
18+
| wine |
19+
| mnist-pytorch |
20+
| tfsimple1 |
21+
1222

13-
Scenario: Success - Load a model again
23+
Scenario: Success - Load a model and expect status model available
1424
Given I have an "iris" model
1525
When the model is applied
16-
Then the model should eventually become Ready
26+
And the model eventually becomes Ready
27+
Then the model status message should eventually be "ModelAvailable"
28+
1729

1830
Scenario: Load a specific model
1931
Given I deploy model spec with timeout "10s":
@@ -31,22 +43,4 @@ Feature: Model deployment
3143
"""
3244
Then the model should eventually become Ready
3345

34-
Scenario: Success - Load a model and expect status model available
35-
Given I have an "iris" model
36-
When the model is applied
37-
And the model eventually becomes Ready
38-
Then the model status message should eventually be "ModelAvailable"
39-
40-
Scenario: Success - Load a model with min replicas
41-
Given I have an "iris" model
42-
And the model has "1" min replicas
43-
When the model is applied
44-
Then the model should eventually become Ready
45-
46-
# todo: change model type
47-
Scenario: Success - Load a big model
48-
Given I have an "iris" model
49-
When the model is applied
50-
Then the model should eventually become Ready
51-
5246

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,20 @@
1-
#@ModelInference @Models @Inference
2-
#Feature Basic model inferencing
3-
#
4-
# Background:
5-
# Given a clean test namespace
6-
#
7-
# Scenario: Model can serve prediction
8-
# Given I have an "iris" model
9-
# And the model is applied
10-
# And the model eventually becomes Ready
11-
# When I send a prediction request with payload:
12-
# """
13-
# { "inputs": [1.0, 2.0, 3.0] }
14-
# """
15-
# Then the response status should be 200
16-
# And the response body should contain "predictions"
1+
@ModelInference @Models @Inference @Functional
2+
Feature: Basic model inferencing
3+
4+
Scenario Outline: Success - Inference for <model> model
5+
Given I have an "<model>" model
6+
When the model is applied
7+
Then the model should eventually become Ready
8+
When I send a valid HTTP inference request with timeout "20s"
9+
Then expect http response status code "200"
10+
When I send a valid gRPC inference request with timeout "20s"
11+
12+
Examples:
13+
| model |
14+
| iris |
15+
# | income-xgb | having errors with GRPC
16+
# | mnist-onnx |
17+
# | income-lgb | having errors with response
18+
| tfsimple1 |
19+
| wine |
20+
# | mnist-pytorch | having errors with response

tests/integration/godog/k8sclient/watcher_store.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ func (s *WatcherStore) Start() {
9090
if err != nil {
9191
s.logger.WithError(err).Error("failed to access model watcher")
9292
} else {
93-
s.logger.Debugf("new model watch event with name: %s on namespace: %s", accessor.GetName(), accessor.GetNamespace())
93+
s.logger.WithField("event", event).Tracef("new model watch event with name: %s on namespace: %s", accessor.GetName(), accessor.GetNamespace())
9494
}
9595

9696
if event.Object == nil {

tests/integration/godog/steps/infer.go

Lines changed: 57 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -25,24 +25,48 @@ import (
2525
"google.golang.org/grpc/metadata"
2626
)
2727

28-
func (i *inference) sendHTTPModelInferenceRequest(ctx context.Context, model string, payload *godog.DocString) error {
29-
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
30-
fmt.Sprintf("%s://%s:%d/v2/models/%s/infer", httpScheme(i.ssl), i.host, i.httpPort, model), strings.NewReader(payload.Content))
28+
func (i *inference) doHTTPModelInferenceRequest(ctx context.Context, modelName, body string) error {
29+
url := fmt.Sprintf(
30+
"%s://%s:%d/v2/models/%s/infer",
31+
httpScheme(i.ssl),
32+
i.host,
33+
i.httpPort,
34+
modelName,
35+
)
36+
37+
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, strings.NewReader(body))
3138
if err != nil {
3239
return fmt.Errorf("could not create http request: %w", err)
3340
}
41+
3442
req.Header.Add("Content-Type", "application/json")
3543
req.Header.Add("Host", "seldon-mesh.inference.seldon")
36-
req.Header.Add("Seldon-model", model)
44+
req.Header.Add("Seldon-model", modelName)
3745

3846
resp, err := i.http.Do(req)
3947
if err != nil {
4048
return fmt.Errorf("could not send http request: %w", err)
4149
}
50+
4251
i.lastHTTPResponse = resp
4352
return nil
4453
}
4554

55+
// Used from steps that pass an explicit payload (DocString)
56+
func (i *inference) sendHTTPModelInferenceRequest(ctx context.Context, model string, payload *godog.DocString) error {
57+
return i.doHTTPModelInferenceRequest(ctx, model, payload.Content)
58+
}
59+
60+
// Used from steps that work from a *Model and testModels table
61+
func (i *inference) sendHTTPModelInferenceRequestFromModel(ctx context.Context, m *Model) error {
62+
testModel, ok := testModels[m.modelType]
63+
if !ok {
64+
return fmt.Errorf("could not find test model %s", m.model.Name)
65+
}
66+
67+
return i.doHTTPModelInferenceRequest(ctx, m.modelName, testModel.ValidInferenceRequest)
68+
}
69+
4670
func httpScheme(useSSL bool) string {
4771
if useSSL {
4872
return "https"
@@ -51,20 +75,35 @@ func httpScheme(useSSL bool) string {
5175
}
5276

5377
func (i *inference) sendGRPCModelInferenceRequest(ctx context.Context, model string, payload *godog.DocString) error {
54-
var msg *v2_dataplane.ModelInferRequest
55-
if err := json.Unmarshal([]byte(payload.Content), &msg); err != nil {
78+
return i.doGRPCModelInferenceRequest(ctx, model, payload.Content)
79+
}
80+
81+
func (i *inference) sendGRPCModelInferenceRequestFromModel(ctx context.Context, m *Model) error {
82+
testModel, ok := testModels[m.modelType]
83+
if !ok {
84+
return fmt.Errorf("could not find test model %s", m.model.Name)
85+
}
86+
return i.doGRPCModelInferenceRequest(ctx, m.modelName, testModel.ValidInferenceRequest)
87+
}
88+
89+
func (i *inference) doGRPCModelInferenceRequest(
90+
ctx context.Context,
91+
model string,
92+
payload string,
93+
) error {
94+
var req v2_dataplane.ModelInferRequest
95+
if err := json.Unmarshal([]byte(payload), &req); err != nil {
5696
return fmt.Errorf("could not unmarshal gRPC json payload: %w", err)
5797
}
58-
msg.ModelName = model
98+
req.ModelName = model
5999

60100
md := metadata.Pairs("seldon-model", model)
61-
ctx = metadata.NewOutgoingContext(context.Background(), md)
62-
resp, err := i.grpc.ModelInfer(ctx, msg)
63-
if err != nil {
64-
i.lastGRPCResponse.err = err
65-
}
101+
ctx = metadata.NewOutgoingContext(ctx, md)
102+
103+
resp, err := i.grpc.ModelInfer(ctx, &req)
66104

67105
i.lastGRPCResponse.response = resp
106+
i.lastGRPCResponse.err = err
68107
return nil
69108
}
70109

@@ -196,7 +235,12 @@ func (i *inference) httpRespCheckStatus(status int) error {
196235
return errors.New("no http response found")
197236
}
198237
if status != i.lastHTTPResponse.StatusCode {
199-
return fmt.Errorf("expected http response status code %d, got %d", status, i.lastHTTPResponse.StatusCode)
238+
body, err := io.ReadAll(i.lastHTTPResponse.Body)
239+
if err != nil {
240+
return fmt.Errorf("expected http response status code %d, got %d", status, i.lastHTTPResponse.StatusCode)
241+
}
242+
return fmt.Errorf("expected http response status code %d, got %d with body: %s", status, i.lastHTTPResponse.StatusCode, body)
243+
200244
}
201245
return nil
202246
}

0 commit comments

Comments
 (0)