diff --git a/CHANGELOG.md b/CHANGELOG.md index 932a5b3545cb9fb393ec933c99281ed863dece08..4367784164314556482fc0666b031d5b121f2edb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,19 @@ -## 20.09.2 +## 20.09.2 (in progress) FEATURES * implemented possibility to send data without writing to database (no need of consecutive indexes, etc. but will not be able to consume such data) +* allow to return incomplete datasets (wihout error if one sets minimum dataset size, otherwise with "partial data" error) IMPROVEMENTS +* Consumer API - change behavior of GetLast/get_last - do not set current pointer after call to the last image * Producer API - return original data in callback payload. * Producer API - allow to set queue limits (number of pending requests and/or max memory), reject new requests if reached the limits +BREAKING CHANGES +* Consumer API - get_next_dataset, get_last_dataset, get_dataset_by_id return dictionary with 'id','expected_size','content' fields, not tuple (id,content) as before +* Consumer API - remove group_id argument from get_last/get_by_id/get_last_dataset/get_dataset_by_id functions +* Producer API - changed meaning of subsets (subset_id replaced with id_in_subset and this means now id of the image within a subset (e.g. module number for multi-module detector)), file_id is now a global id of a multi-set data (i.g. multi-image id) + ## 20.09.1 FEATURES diff --git a/broker/src/asapo_broker/database/database.go b/broker/src/asapo_broker/database/database.go index 61ec11360a53ebf7a9e47c11db0d655d4a9d21e7..2ec1142d4dde6c50ea2557dff8ea0af6401d939c 100644 --- a/broker/src/asapo_broker/database/database.go +++ b/broker/src/asapo_broker/database/database.go @@ -1,7 +1,19 @@ package database +import "asapo_common/utils" + +type Request struct { + DbName string + DbCollectionName string + GroupId string + Op string + DatasetOp bool + MinDatasetSize int + ExtraParam string +} + type Agent interface { - ProcessRequest(db_name string, data_collection_name string, group_id string, op string, extra string) ([]byte, error) + ProcessRequest(request Request) ([]byte, error) Ping() error Connect(string) error Close() @@ -21,3 +33,13 @@ type DBError struct { func (err *DBError) Error() string { return err.Message } + +func GetStatusCodeFromError(err error) int { + err_db, ok := err.(*DBError) + if ok { + return err_db.Code + } else { + return utils.StatusServiceUnavailable + } +} + diff --git a/broker/src/asapo_broker/database/mock_database.go b/broker/src/asapo_broker/database/mock_database.go index 3797f8afb6d9fa006fb3b30ac488d66a55c59c11..574a8aa5161a133454dccdaf6d85cdda9e4f4b7e 100644 --- a/broker/src/asapo_broker/database/mock_database.go +++ b/broker/src/asapo_broker/database/mock_database.go @@ -29,7 +29,7 @@ func (db *MockedDatabase) SetSettings(settings DBSettings) { } -func (db *MockedDatabase) ProcessRequest(db_name string, data_collection_name string, group_id string, op string, extra_param string) (answer []byte, err error) { - args := db.Called(db_name, data_collection_name, group_id, op, extra_param) +func (db *MockedDatabase) ProcessRequest(request Request) (answer []byte, err error) { + args := db.Called(request) return args.Get(0).([]byte), args.Error(1) } diff --git a/broker/src/asapo_broker/database/mongodb.go b/broker/src/asapo_broker/database/mongodb.go index 576655bb496790466c116b2d6f688c27f50459a9..48fe7150aa3dbcf5faaedf055c5b6a785d5ee503 100644 --- a/broker/src/asapo_broker/database/mongodb.go +++ b/broker/src/asapo_broker/database/mongodb.go @@ -8,7 +8,9 @@ import ( "context" "encoding/json" "errors" + "fmt" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "math" @@ -159,14 +161,19 @@ func (db *Mongodb) insertMeta(dbname string, s interface{}) error { return err } -func (db *Mongodb) getMaxIndex(dbname string, collection_name string, dataset bool) (max_id int, err error) { - c := db.client.Database(dbname).Collection(data_collection_name_prefix + collection_name) +func (db *Mongodb) getMaxIndex(request Request, returnIncompete bool) (max_id int, err error) { + c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.DbCollectionName) var q bson.M - if dataset { - q = bson.M{"$expr": bson.M{"$eq": []interface{}{"$size", bson.M{"$size": "$images"}}}} + if request.DatasetOp && !returnIncompete { + if request.MinDatasetSize>0 { + q = bson.M{"size": bson.M{"$gte": request.MinDatasetSize}} + } else { + q = bson.M{"$expr": bson.M{"$eq": []interface{}{"$size", bson.M{"$size": "$images"}}}} + } } else { q = nil } + opts := options.FindOne().SetSort(bson.M{"_id": -1}).SetReturnKey(true) var result ID err = c.FindOne(context.TODO(), q, opts).Decode(&result) @@ -190,20 +197,20 @@ func duplicateError(err error) bool { return command_error.Name == "DuplicateKey" } -func (db *Mongodb) setCounter(dbname string, collection_name string, group_id string, ind int) (err error) { +func (db *Mongodb) setCounter(request Request, ind int) (err error) { update := bson.M{"$set": bson.M{pointer_field_name: ind}} opts := options.Update().SetUpsert(true) - c := db.client.Database(dbname).Collection(pointer_collection_name) - q := bson.M{"_id": group_id + "_" + collection_name} + c := db.client.Database(request.DbName).Collection(pointer_collection_name) + q := bson.M{"_id": request.GroupId + "_" + request.DbCollectionName} _, err = c.UpdateOne(context.TODO(), q, update, opts) return } -func (db *Mongodb) incrementField(dbname string, collection_name string, group_id string, max_ind int, res interface{}) (err error) { +func (db *Mongodb) incrementField(request Request, max_ind int, res interface{}) (err error) { update := bson.M{"$inc": bson.M{pointer_field_name: 1}} opts := options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After) - q := bson.M{"_id": group_id + "_" + collection_name, pointer_field_name: bson.M{"$lt": max_ind}} - c := db.client.Database(dbname).Collection(pointer_collection_name) + q := bson.M{"_id": request.GroupId + "_" + request.DbCollectionName, pointer_field_name: bson.M{"$lt": max_ind}} + c := db.client.Database(request.DbName).Collection(pointer_collection_name) err = c.FindOneAndUpdate(context.TODO(), q, update, opts).Decode(res) if err != nil { @@ -234,26 +241,49 @@ func encodeAnswer(id, id_max int, next_substream string) string { return string(answer) } -func (db *Mongodb) getRecordByIDRow(dbname string, collection_name string, id, id_max int, dataset bool) ([]byte, error) { +func (db *Mongodb) getRecordByIDRow(request Request, id, id_max int) ([]byte, error) { var res map[string]interface{} - var q bson.M - if dataset { - q = bson.M{"$and": []bson.M{bson.M{"_id": id}, bson.M{"$expr": bson.M{"$eq": []interface{}{"$size", bson.M{"$size": "$images"}}}}}} - } else { - q = bson.M{"_id": id} - } + q := bson.M{"_id": id} - c := db.client.Database(dbname).Collection(data_collection_name_prefix + collection_name) + c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.DbCollectionName) err := c.FindOne(context.TODO(), q, options.FindOne()).Decode(&res) if err != nil { answer := encodeAnswer(id, id_max, "") - log_str := "error getting record id " + strconv.Itoa(id) + " for " + dbname + " : " + err.Error() + log_str := "error getting record id " + strconv.Itoa(id) + " for " + request.DbName + " : " + err.Error() + fmt.Println(err) logger.Debug(log_str) return nil, &DBError{utils.StatusNoData, answer} } - log_str := "got record id " + strconv.Itoa(id) + " for " + dbname - logger.Debug(log_str) - return utils.MapToJson(&res) + + partialData := false + if request.DatasetOp { + imgs,ok1 :=res["images"].(primitive.A) + expectedSize,ok2 := utils.InterfaceToInt64(res["size"]) + if !ok1 || !ok2 { + return nil, &DBError{utils.StatusTransactionInterrupted, "getRecordByIDRow: cannot parse database response" } + } + nImages := len(imgs) + if (request.MinDatasetSize==0 && int64(nImages)!=expectedSize) || (request.MinDatasetSize==0 && nImages<request.MinDatasetSize) { + partialData = true + } + } + + if partialData { + log_str := "got record id " + strconv.Itoa(id) + " for " + request.DbName + logger.Debug(log_str) + } else { + log_str := "got record id " + strconv.Itoa(id) + " for " + request.DbName + logger.Debug(log_str) + } + + answer,err := utils.MapToJson(&res) + if err!=nil { + return nil,err + } + if partialData { + return nil,&DBError{utils.StatusPartialData, string(answer)} + } + return answer,nil } func (db *Mongodb) getEarliestRecord(dbname string, collection_name string) (map[string]interface{}, error) { @@ -272,22 +302,22 @@ func (db *Mongodb) getEarliestRecord(dbname string, collection_name string) (map return res,nil } -func (db *Mongodb) getRecordByID(dbname string, collection_name string, group_id string, id_str string, dataset bool) ([]byte, error) { - id, err := strconv.Atoi(id_str) +func (db *Mongodb) getRecordByID(request Request) ([]byte, error) { + id, err := strconv.Atoi(request.ExtraParam) if err != nil { return nil, &DBError{utils.StatusWrongInput, err.Error()} } - max_ind, err := db.getMaxIndex(dbname, collection_name, dataset) + max_ind, err := db.getMaxIndex(request,true) if err != nil { return nil, err } - return db.getRecordByIDRow(dbname, collection_name, id, max_ind, dataset) + return db.getRecordByIDRow(request, id, max_ind) } -func (db *Mongodb) negAckRecord(dbname string, group_id string, input_str string) ([]byte, error) { +func (db *Mongodb) negAckRecord(request Request) ([]byte, error) { input := struct { Id int Params struct { @@ -295,27 +325,27 @@ func (db *Mongodb) negAckRecord(dbname string, group_id string, input_str string } }{} - err := json.Unmarshal([]byte(input_str), &input) + err := json.Unmarshal([]byte(request.ExtraParam), &input) if err != nil { return nil, &DBError{utils.StatusWrongInput, err.Error()} } - err = db.InsertRecordToInprocess(dbname,inprocess_collection_name_prefix+group_id,input.Id,input.Params.DelaySec, 1) + err = db.InsertRecordToInprocess(request.DbName,inprocess_collection_name_prefix+request.GroupId,input.Id,input.Params.DelaySec, 1) return []byte(""), err } -func (db *Mongodb) ackRecord(dbname string, collection_name string, group_id string, id_str string) ([]byte, error) { +func (db *Mongodb) ackRecord(request Request) ([]byte, error) { var record ID - err := json.Unmarshal([]byte(id_str),&record) + err := json.Unmarshal([]byte(request.ExtraParam),&record) if err != nil { return nil, &DBError{utils.StatusWrongInput, err.Error()} } - c := db.client.Database(dbname).Collection(acks_collection_name_prefix + collection_name + "_" + group_id) + c := db.client.Database(request.DbName).Collection(acks_collection_name_prefix + request.DbCollectionName + "_" + request.GroupId) _, err = c.InsertOne(context.Background(), &record) if err == nil { - c = db.client.Database(dbname).Collection(inprocess_collection_name_prefix + group_id) + c = db.client.Database(request.DbName).Collection(inprocess_collection_name_prefix + request.GroupId) _, err_del := c.DeleteOne(context.Background(), bson.M{"_id": record.ID}) if err_del != nil { return nil, &DBError{utils.StatusWrongInput, err.Error()} @@ -325,20 +355,20 @@ func (db *Mongodb) ackRecord(dbname string, collection_name string, group_id str return []byte(""), err } -func (db *Mongodb) checkDatabaseOperationPrerequisites(db_name string, collection_name string, group_id string) error { +func (db *Mongodb) checkDatabaseOperationPrerequisites(request Request) error { if db.client == nil { return &DBError{utils.StatusServiceUnavailable, no_session_msg} } - if len(db_name) == 0 || len(collection_name) == 0 { + if len(request.DbName) == 0 || len(request.DbCollectionName) == 0 { return &DBError{utils.StatusWrongInput, "beamtime_id ans substream must be set"} } return nil } -func (db *Mongodb) getCurrentPointer(db_name string, collection_name string, group_id string, dataset bool) (LocationPointer, int, error) { - max_ind, err := db.getMaxIndex(db_name, collection_name, dataset) +func (db *Mongodb) getCurrentPointer(request Request) (LocationPointer, int, error) { + max_ind, err := db.getMaxIndex(request,true) if err != nil { return LocationPointer{}, 0, err } @@ -348,7 +378,7 @@ func (db *Mongodb) getCurrentPointer(db_name string, collection_name string, gro } var curPointer LocationPointer - err = db.incrementField(db_name, collection_name, group_id, max_ind, &curPointer) + err = db.incrementField(request, max_ind, &curPointer) if err != nil { return LocationPointer{}, 0, err } @@ -408,11 +438,11 @@ func (db *Mongodb) InsertToInprocessIfNeeded(db_name string, collection_name str } -func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(db_name string, collection_name string, group_id string, dataset bool, extra_param string, ignoreTimeout bool) (int, int, error) { +func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(request Request, ignoreTimeout bool) (int, int, error) { var record_ind, max_ind, delaySec, nResendAttempts int var err error - if len(extra_param) != 0 { - delaySec, nResendAttempts, err = extractsTwoIntsFromString(extra_param) + if len(request.ExtraParam) != 0 { + delaySec, nResendAttempts, err = extractsTwoIntsFromString(request.ExtraParam) if err != nil { return 0, 0, err } @@ -421,15 +451,15 @@ func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(db_name string, collectio } tNow := time.Now().Unix() if (atomic.LoadInt64(&db.lastReadFromInprocess) <= tNow-int64(db.settings.ReadFromInprocessPeriod)) || ignoreTimeout { - record_ind, err = db.getUnProcessedId(db_name, inprocess_collection_name_prefix+group_id, delaySec,nResendAttempts) + record_ind, err = db.getUnProcessedId(request.DbName, inprocess_collection_name_prefix+request.GroupId, delaySec,nResendAttempts) if err != nil { - log_str := "error getting unprocessed id " + db_name + ", groupid: " + group_id + ":" + err.Error() + log_str := "error getting unprocessed id " + request.DbName + ", groupid: " + request.GroupId + ":" + err.Error() logger.Debug(log_str) return 0, 0, err } } if record_ind != 0 { - max_ind, err = db.getMaxIndex(db_name, collection_name, dataset) + max_ind, err = db.getMaxIndex(request, true) if err != nil { return 0, 0, err } @@ -441,29 +471,29 @@ func (db *Mongodb) getNextAndMaxIndexesFromInprocessed(db_name string, collectio } -func (db *Mongodb) getNextAndMaxIndexesFromCurPointer(db_name string, collection_name string, group_id string, dataset bool, extra_param string) (int, int, error) { - curPointer, max_ind, err := db.getCurrentPointer(db_name, collection_name, group_id, dataset) +func (db *Mongodb) getNextAndMaxIndexesFromCurPointer(request Request) (int, int, error) { + curPointer, max_ind, err := db.getCurrentPointer(request) if err != nil { - log_str := "error getting next pointer for " + db_name + ", groupid: " + group_id + ":" + err.Error() + log_str := "error getting next pointer for " + request.DbName + ", groupid: " + request.GroupId + ":" + err.Error() logger.Debug(log_str) return 0, 0, err } - log_str := "got next pointer " + strconv.Itoa(curPointer.Value) + " for " + db_name + ", groupid: " + group_id + log_str := "got next pointer " + strconv.Itoa(curPointer.Value) + " for " + request.DbName + ", groupid: " + request.GroupId logger.Debug(log_str) return curPointer.Value, max_ind, nil } -func (db *Mongodb) getNextAndMaxIndexes(db_name string, collection_name string, group_id string, dataset bool, extra_param string) (int, int, error) { - nextInd, maxInd, err := db.getNextAndMaxIndexesFromInprocessed(db_name, collection_name, group_id, dataset, extra_param, false) +func (db *Mongodb) getNextAndMaxIndexes(request Request) (int, int, error) { + nextInd, maxInd, err := db.getNextAndMaxIndexesFromInprocessed(request, false) if err != nil { return 0, 0, err } if nextInd == 0 { - nextInd, maxInd, err = db.getNextAndMaxIndexesFromCurPointer(db_name, collection_name, group_id, dataset, extra_param) + nextInd, maxInd, err = db.getNextAndMaxIndexesFromCurPointer(request) if err_db, ok := err.(*DBError); ok && err_db.Code == utils.StatusNoData { var err_inproc error - nextInd, maxInd, err_inproc = db.getNextAndMaxIndexesFromInprocessed(db_name, collection_name, group_id, dataset, extra_param, true) + nextInd, maxInd, err_inproc = db.getNextAndMaxIndexesFromInprocessed(request, true) if err_inproc != nil { return 0, 0, err_inproc } @@ -475,8 +505,7 @@ func (db *Mongodb) getNextAndMaxIndexes(db_name string, collection_name string, return nextInd, maxInd, nil } -func (db *Mongodb) processLastRecord(data []byte, err error, db_name string, collection_name string, - group_id string, dataset bool, extra_param string) ([]byte, error) { +func (db *Mongodb) processLastRecord(request Request, data []byte, err error) ([]byte, error) { var r ServiceRecord err = json.Unmarshal(data, &r) if err != nil || r.Name != finish_substream_keyword { @@ -489,35 +518,36 @@ func (db *Mongodb) processLastRecord(data []byte, err error, db_name string, col } answer := encodeAnswer(r.ID, r.ID, next_substream) - log_str := "reached end of substream " + collection_name + " , next_substream: " + next_substream + log_str := "reached end of substream " + request.DbCollectionName + " , next_substream: " + next_substream logger.Debug(log_str) var err_inproc error - nextInd, maxInd, err_inproc := db.getNextAndMaxIndexesFromInprocessed(db_name, collection_name, group_id, dataset, extra_param, true) + nextInd, maxInd, err_inproc := db.getNextAndMaxIndexesFromInprocessed(request, true) if err_inproc != nil { return nil, err_inproc } if nextInd != 0 { - return db.getRecordByIDRow(db_name, collection_name, nextInd, maxInd, dataset) + return db.getRecordByIDRow(request, nextInd, maxInd) } return nil, &DBError{utils.StatusNoData, answer} } -func (db *Mongodb) getNextRecord(db_name string, collection_name string, group_id string, dataset bool, extra_param string) ([]byte, error) { - nextInd, maxInd, err := db.getNextAndMaxIndexes(db_name, collection_name, group_id, dataset, extra_param) +func (db *Mongodb) getNextRecord(request Request) ([]byte, error) { + + nextInd, maxInd, err := db.getNextAndMaxIndexes(request) if err != nil { return nil, err } - data, err := db.getRecordByIDRow(db_name, collection_name, nextInd, maxInd, dataset) - if nextInd == maxInd { - data, err = db.processLastRecord(data, err,db_name,collection_name,group_id,dataset,extra_param) + data, err := db.getRecordByIDRow(request, nextInd, maxInd) + if nextInd == maxInd && GetStatusCodeFromError(err)!=utils.StatusPartialData { + data, err = db.processLastRecord(request,data, err) } if err == nil { - err_update := db.InsertToInprocessIfNeeded(db_name, inprocess_collection_name_prefix+group_id, nextInd, extra_param) + err_update := db.InsertToInprocessIfNeeded(request.DbName, inprocess_collection_name_prefix+request.GroupId, nextInd, request.ExtraParam) if err_update != nil { return nil, err_update } @@ -525,20 +555,16 @@ func (db *Mongodb) getNextRecord(db_name string, collection_name string, group_i return data, err } -func (db *Mongodb) getLastRecord(db_name string, collection_name string, group_id string, dataset bool) ([]byte, error) { - max_ind, err := db.getMaxIndex(db_name, collection_name, dataset) +func (db *Mongodb) getLastRecord(request Request) ([]byte, error) { + max_ind, err := db.getMaxIndex(request, false) if err != nil { return nil, err } - res, err := db.getRecordByIDRow(db_name, collection_name, max_ind, max_ind, dataset) - - db.setCounter(db_name, collection_name, group_id, max_ind) - - return res, err + return db.getRecordByIDRow(request, max_ind, max_ind) } -func (db *Mongodb) getSize(db_name string, collection_name string) ([]byte, error) { - c := db.client.Database(db_name).Collection(data_collection_name_prefix + collection_name) +func (db *Mongodb) getSize(request Request) ([]byte, error) { + c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.DbCollectionName) var rec SizeRecord var err error @@ -550,18 +576,18 @@ func (db *Mongodb) getSize(db_name string, collection_name string) ([]byte, erro return json.Marshal(&rec) } -func (db *Mongodb) resetCounter(db_name string, collection_name string, group_id string, id_str string) ([]byte, error) { - id, err := strconv.Atoi(id_str) +func (db *Mongodb) resetCounter(request Request) ([]byte, error) { + id, err := strconv.Atoi(request.ExtraParam) if err != nil { return nil, err } - err = db.setCounter(db_name, collection_name, group_id, id) + err = db.setCounter(request, id) if err!= nil { return []byte(""), err } - c := db.client.Database(db_name).Collection(inprocess_collection_name_prefix + group_id) + c := db.client.Database(request.DbName).Collection(inprocess_collection_name_prefix + request.GroupId) _, err_del := c.DeleteMany(context.Background(), bson.M{"_id": bson.M{"$gte": id}}) if err_del != nil { return nil, &DBError{utils.StatusWrongInput, err.Error()} @@ -570,22 +596,22 @@ func (db *Mongodb) resetCounter(db_name string, collection_name string, group_id return []byte(""), nil } -func (db *Mongodb) getMeta(dbname string, id_str string) ([]byte, error) { - id, err := strconv.Atoi(id_str) +func (db *Mongodb) getMeta(request Request) ([]byte, error) { + id, err := strconv.Atoi(request.ExtraParam) if err != nil { return nil, err } var res map[string]interface{} q := bson.M{"_id": id} - c := db.client.Database(dbname).Collection(meta_collection_name) + c := db.client.Database(request.DbName).Collection(meta_collection_name) err = c.FindOne(context.TODO(), q, options.FindOne()).Decode(&res) if err != nil { - log_str := "error getting meta with id " + strconv.Itoa(id) + " for " + dbname + " : " + err.Error() + log_str := "error getting meta with id " + strconv.Itoa(id) + " for " + request.DbName + " : " + err.Error() logger.Debug(log_str) return nil, &DBError{utils.StatusNoData, err.Error()} } - log_str := "got record id " + strconv.Itoa(id) + " for " + dbname + log_str := "got record id " + strconv.Itoa(id) + " for " + request.DbName logger.Debug(log_str) return utils.MapToJson(&res) } @@ -596,16 +622,16 @@ func (db *Mongodb) processQueryError(query, dbname string, err error) ([]byte, e return nil, &DBError{utils.StatusNoData, err.Error()} } -func (db *Mongodb) queryImages(dbname string, collection_name string, query string) ([]byte, error) { +func (db *Mongodb) queryImages(request Request) ([]byte, error) { var res []map[string]interface{} - q, sort, err := db.BSONFromSQL(dbname, query) + q, sort, err := db.BSONFromSQL(request.DbName, request.ExtraParam) if err != nil { - log_str := "error parsing query: " + query + " for " + dbname + " : " + err.Error() + log_str := "error parsing query: " + request.ExtraParam + " for " + request.DbName + " : " + err.Error() logger.Debug(log_str) return nil, &DBError{utils.StatusWrongInput, err.Error()} } - c := db.client.Database(dbname).Collection(data_collection_name_prefix + collection_name) + c := db.client.Database(request.DbName).Collection(data_collection_name_prefix + request.DbCollectionName) opts := options.Find() if len(sort) > 0 { @@ -615,14 +641,14 @@ func (db *Mongodb) queryImages(dbname string, collection_name string, query stri cursor, err := c.Find(context.TODO(), q, opts) if err != nil { - return db.processQueryError(query, dbname, err) + return db.processQueryError(request.ExtraParam, request.DbName, err) } err = cursor.All(context.TODO(), &res) if err != nil { - return db.processQueryError(query, dbname, err) + return db.processQueryError(request.ExtraParam, request.DbName, err) } - log_str := "processed query " + query + " for " + dbname + " ,found" + strconv.Itoa(len(res)) + " records" + log_str := "processed query " + request.ExtraParam + " for " + request.DbName + " ,found" + strconv.Itoa(len(res)) + " records" logger.Debug(log_str) if res != nil { return utils.MapToJson(&res) @@ -658,8 +684,8 @@ func extractsTwoIntsFromString(from_to string) (int, int, error) { } -func (db *Mongodb) nacks(db_name string, collection_name string, group_id string, from_to string) ([]byte, error) { - from, to, err := extractsTwoIntsFromString(from_to) +func (db *Mongodb) nacks(request Request) ([]byte, error) { + from, to, err := extractsTwoIntsFromString(request.ExtraParam) if err != nil { return nil, err } @@ -669,7 +695,7 @@ func (db *Mongodb) nacks(db_name string, collection_name string, group_id string } if to == 0 { - to, err = db.getMaxIndex(db_name, collection_name, false) + to, err = db.getMaxIndex(request, true) if err != nil { return nil, err } @@ -680,7 +706,7 @@ func (db *Mongodb) nacks(db_name string, collection_name string, group_id string return utils.MapToJson(&res) } - res.Unacknowledged, err = db.getNacks(db_name, collection_name, group_id, from, to) + res.Unacknowledged, err = db.getNacks(request, from, to) if err != nil { return nil, err } @@ -688,8 +714,8 @@ func (db *Mongodb) nacks(db_name string, collection_name string, group_id string return utils.MapToJson(&res) } -func (db *Mongodb) lastAck(db_name string, collection_name string, group_id string) ([]byte, error) { - c := db.client.Database(db_name).Collection(acks_collection_name_prefix + collection_name + "_" + group_id) +func (db *Mongodb) lastAck(request Request) ([]byte, error) { + c := db.client.Database(request.DbName).Collection(acks_collection_name_prefix + request.DbCollectionName + "_" + request.GroupId) opts := options.FindOne().SetSort(bson.M{"_id": -1}).SetReturnKey(true) result := LastAck{0} var q bson.M = nil @@ -704,9 +730,9 @@ func (db *Mongodb) lastAck(db_name string, collection_name string, group_id stri return utils.MapToJson(&result) } -func (db *Mongodb) getNacks(db_name string, collection_name string, group_id string, min_index, max_index int) ([]int, error) { +func (db *Mongodb) getNacks(request Request, min_index, max_index int) ([]int, error) { - c := db.client.Database(db_name).Collection(acks_collection_name_prefix + collection_name + "_" + group_id) + c := db.client.Database(request.DbName).Collection(acks_collection_name_prefix + request.DbCollectionName + "_" + request.GroupId) if min_index > max_index { return []int{}, errors.New("from index is greater than to index") @@ -755,52 +781,46 @@ func (db *Mongodb) getNacks(db_name string, collection_name string, group_id str return resp[0].Numbers, nil } -func (db *Mongodb) getSubstreams(db_name string, from string) ([]byte, error) { - rec, err := substreams.getSubstreams(db,db_name,from) +func (db *Mongodb) getSubstreams(request Request) ([]byte, error) { + rec, err := substreams.getSubstreams(db,request.DbName,request.ExtraParam) if err != nil { - return db.processQueryError("get substreams", db_name, err) + return db.processQueryError("get substreams", request.DbName, err) } return json.Marshal(&rec) } -func (db *Mongodb) ProcessRequest(db_name string, collection_name string, group_id string, op string, extra_param string) (answer []byte, err error) { - dataset := false - if strings.HasSuffix(op, "_dataset") { - dataset = true - op = op[:len(op)-8] - } - - if err := db.checkDatabaseOperationPrerequisites(db_name, collection_name, group_id); err != nil { +func (db *Mongodb) ProcessRequest(request Request) (answer []byte, err error) { + if err := db.checkDatabaseOperationPrerequisites(request); err != nil { return nil, err } - switch op { + switch request.Op { case "next": - return db.getNextRecord(db_name, collection_name, group_id, dataset, extra_param) + return db.getNextRecord(request) case "id": - return db.getRecordByID(db_name, collection_name, group_id, extra_param, dataset) + return db.getRecordByID(request) case "last": - return db.getLastRecord(db_name, collection_name, group_id, dataset) + return db.getLastRecord(request) case "resetcounter": - return db.resetCounter(db_name, collection_name, group_id, extra_param) + return db.resetCounter(request) case "size": - return db.getSize(db_name, collection_name) + return db.getSize(request) case "meta": - return db.getMeta(db_name, extra_param) + return db.getMeta(request) case "queryimages": - return db.queryImages(db_name, collection_name, extra_param) + return db.queryImages(request) case "substreams": - return db.getSubstreams(db_name,extra_param) + return db.getSubstreams(request) case "ackimage": - return db.ackRecord(db_name, collection_name, group_id, extra_param) + return db.ackRecord(request) case "negackimage": - return db.negAckRecord(db_name, group_id, extra_param) + return db.negAckRecord(request) case "nacks": - return db.nacks(db_name, collection_name, group_id, extra_param) + return db.nacks(request) case "lastack": - return db.lastAck(db_name, collection_name, group_id) + return db.lastAck(request) } - return nil, errors.New("Wrong db operation: " + op) + return nil, errors.New("Wrong db operation: " + request.Op) } diff --git a/broker/src/asapo_broker/database/mongodb_substreams.go b/broker/src/asapo_broker/database/mongodb_substreams.go index a96b89b8b4bc83959995457579c679ffda2c8b79..999e6fa17b1c2b07b24db67e2d0166d7291d336d 100644 --- a/broker/src/asapo_broker/database/mongodb_substreams.go +++ b/broker/src/asapo_broker/database/mongodb_substreams.go @@ -3,6 +3,7 @@ package database import ( + "asapo_common/utils" "context" "errors" "go.mongodb.org/mongo-driver/bson" @@ -76,14 +77,10 @@ func updateTimestamps(db *Mongodb, db_name string, rec *SubstreamsRecord) { } res, err := db.getEarliestRecord(db_name, record.Name) if err == nil { - ts,ok:=res["timestamp"].(int64) - var tsint float64 - if !ok { // we need this (at least for tests) since by default values are float in mongo - tsint,ok = res["timestamp"].(float64) - ts = int64(tsint) - } + ts,ok:=utils.InterfaceToInt64(res["timestamp"]) if ok { - rec.Substreams[i].Timestamp = ts } + rec.Substreams[i].Timestamp = ts + } } } } diff --git a/broker/src/asapo_broker/database/mongodb_test.go b/broker/src/asapo_broker/database/mongodb_test.go index 8e2d97295a7f2b3e782fea6757b24b9098bd06c1..905b2c3613d40273379bd98663f424c7056725ea 100644 --- a/broker/src/asapo_broker/database/mongodb_test.go +++ b/broker/src/asapo_broker/database/mongodb_test.go @@ -13,15 +13,15 @@ import ( ) type TestRecord struct { - ID int `bson:"_id" json:"_id"` - Meta map[string]string `bson:"meta" json:"meta"` - Name string `bson:"name" json:"name"` - Timestamp int64 `bson:"timestamp" json:"timestamp"` + ID int64 `bson:"_id" json:"_id"` + Meta map[string]string `bson:"meta" json:"meta"` + Name string `bson:"name" json:"name"` + Timestamp int64 `bson:"timestamp" json:"timestamp"` } type TestDataset struct { - ID int `bson:"_id" json:"_id"` - Size int `bson:"size" json:"size"` + ID int64 `bson:"_id" json:"_id"` + Size int64 `bson:"size" json:"size"` Images []TestRecord `bson:"images" json:"images"` } @@ -37,10 +37,10 @@ const metaID_str = "0" var empty_next = map[string]string{"next_substream": ""} -var rec1 = TestRecord{1, empty_next, "aaa",0} -var rec_finished = TestRecord{2, map[string]string{"next_substream": "next1"}, finish_substream_keyword,0} -var rec2 = TestRecord{2, empty_next, "bbb",1} -var rec3 = TestRecord{3, empty_next, "ccc",2} +var rec1 = TestRecord{1, empty_next, "aaa", 0} +var rec_finished = TestRecord{2, map[string]string{"next_substream": "next1"}, finish_substream_keyword, 0} +var rec2 = TestRecord{2, empty_next, "bbb", 1} +var rec3 = TestRecord{3, empty_next, "ccc", 2} var rec1_expect, _ = json.Marshal(rec1) var rec2_expect, _ = json.Marshal(rec2) @@ -75,31 +75,31 @@ func TestMongoDBConnectOK(t *testing.T) { } func TestMongoDBGetNextErrorWhenNotConnected(t *testing.T) { - _, err := db.ProcessRequest(dbname, collection, groupId, "next", "") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code) } func TestMongoDBGetMetaErrorWhenNotConnected(t *testing.T) { - _, err := db.ProcessRequest(dbname, collection, "", "meta", "0") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "meta", ExtraParam: "0"}) assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code) } func TestMongoDBQueryImagesErrorWhenNotConnected(t *testing.T) { - _, err := db.ProcessRequest(dbname, collection, "", "queryimages", "0") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "queryimages", ExtraParam: "0"}) assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code) } func TestMongoDBGetNextErrorWhenWrongDatabasename(t *testing.T) { db.Connect(dbaddress) defer cleanup() - _, err := db.ProcessRequest("", collection, groupId, "next", "") + _, err := db.ProcessRequest(Request{DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Equal(t, utils.StatusWrongInput, err.(*DBError).Code) } func TestMongoDBGetNextErrorWhenNonExistingDatacollectionname(t *testing.T) { db.Connect(dbaddress) defer cleanup() - _, err := db.ProcessRequest(dbname, "bla", groupId, "next", "") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "bla", GroupId: groupId, Op: "next"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error()) } @@ -107,26 +107,25 @@ func TestMongoDBGetNextErrorWhenNonExistingDatacollectionname(t *testing.T) { func TestMongoDBGetLastErrorWhenNonExistingDatacollectionname(t *testing.T) { db.Connect(dbaddress) defer cleanup() - _, err := db.ProcessRequest(dbname, "bla", groupId, "last", "") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "bla", GroupId: groupId, Op: "last"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error()) } -func TestMongoDBGetByIdErrorWhenNonExistingDatacollectionname(t *testing.T) { +func TestMongoDBGetByIdErrorWhenNoData(t *testing.T) { db.Connect(dbaddress) defer cleanup() - _, err := db.ProcessRequest(dbname, collection, groupId, "id", "2") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "2"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":0,\"next_substream\":\"\"}", err.Error()) } - func TestMongoDBGetNextErrorWhenRecordNotThereYet(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec2) - _, err := db.ProcessRequest(dbname, collection, groupId, "next", "") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_substream\":\"\"}", err.Error()) } @@ -135,20 +134,19 @@ func TestMongoDBGetNextOK(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Nil(t, err) assert.Equal(t, string(rec1_expect), string(res)) } - func TestMongoDBGetNextErrorOnFinishedStream(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec_finished) - db.ProcessRequest(dbname, collection, groupId, "next", "") - _, err := db.ProcessRequest(dbname, collection, groupId, "next", "") + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_substream\":\"next1\"}", err.(*DBError).Message) @@ -158,8 +156,8 @@ func TestMongoDBGetNextErrorOnNoMoreData(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - db.ProcessRequest(dbname, collection, groupId, "next", "") - _, err := db.ProcessRequest(dbname, collection, groupId, "next", "") + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err.(*DBError).Message) @@ -170,8 +168,8 @@ func TestMongoDBGetNextCorrectOrder(t *testing.T) { defer cleanup() db.insertRecord(dbname, collection, &rec2) db.insertRecord(dbname, collection, &rec1) - res1, _ := db.ProcessRequest(dbname, collection, groupId, "next", "") - res2, _ := db.ProcessRequest(dbname, collection, groupId, "next", "") + res1, _ := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) + res2, _ := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Equal(t, string(rec1_expect), string(res1)) assert.Equal(t, string(rec2_expect), string(res2)) } @@ -189,10 +187,10 @@ func getNOnes(array []int) int { func insertRecords(n int) { records := make([]TestRecord, n) for ind, record := range records { - record.ID = ind + 1 + record.ID = int64(ind) + 1 record.Name = string(ind) - if err:= db.insertRecord(dbname, collection, &record);err!=nil { - fmt.Println("error at insert ",ind) + if err := db.insertRecord(dbname, collection, &record); err != nil { + fmt.Println("error at insert ", ind) } } } @@ -201,20 +199,20 @@ func getRecords(n int, resend bool) []int { results := make([]int, n) var wg sync.WaitGroup wg.Add(n) - extra_param:="" + extra_param := "" if resend { - extra_param="0_1" + extra_param = "0_1" } for i := 0; i < n; i++ { go func() { defer wg.Done() - res_bin, err:= db.ProcessRequest(dbname, collection, groupId, "next", extra_param) - if err!=nil { - fmt.Println("error at read ",i) + res_bin, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: extra_param}) + if err != nil { + fmt.Println("error at read ", i) } var res TestRecord json.Unmarshal(res_bin, &res) - if res.ID>0 { + if res.ID > 0 { results[res.ID-1] = 1 } }() @@ -230,7 +228,7 @@ func TestMongoDBGetNextInParallel(t *testing.T) { n := 100 insertRecords(n) - results := getRecords(n,false) + results := getRecords(n, false) assert.Equal(t, n, getNOnes(results)) } @@ -242,26 +240,24 @@ func TestMongoDBGetNextInParallelWithResend(t *testing.T) { n := 100 insertRecords(n) - results := getRecords(n,true) - results2 := getRecords(n,true) + results := getRecords(n, true) + results2 := getRecords(n, true) - assert.Equal(t, n, getNOnes(results),"first") - assert.Equal(t, n, getNOnes(results2),"second") + assert.Equal(t, n, getNOnes(results), "first") + assert.Equal(t, n, getNOnes(results2), "second") } - - func TestMongoDBGetLastAfterErasingDatabase(t *testing.T) { db.Connect(dbaddress) defer cleanup() insertRecords(10) - db.ProcessRequest(dbname, collection, groupId, "next", "") + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) db.dropDatabase(dbname) db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "last", "0") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "last", ExtraParam: "0"}) assert.Nil(t, err) assert.Equal(t, string(rec2_expect), string(res)) } @@ -270,12 +266,12 @@ func TestMongoDBGetNextAfterErasingDatabase(t *testing.T) { db.Connect(dbaddress) defer cleanup() insertRecords(200) - db.ProcessRequest(dbname, collection, groupId, "next", "") + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) db.dropDatabase(dbname) n := 100 insertRecords(n) - results := getRecords(n,false) + results := getRecords(n, false) assert.Equal(t, n, getNOnes(results)) } @@ -283,20 +279,20 @@ func TestMongoDBGetNextEmptyAfterErasingDatabase(t *testing.T) { db.Connect(dbaddress) defer cleanup() insertRecords(10) - db.ProcessRequest(dbname, collection, groupId, "next", "") + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) db.dropDatabase(dbname) - _, err := db.ProcessRequest(dbname, collection, groupId, "next", "") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.Error()) } - func TestMongoDBgetRecordByID(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - res, err := db.ProcessRequest(dbname, collection, groupId, "id", "1") + + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "1"}) assert.Nil(t, err) assert.Equal(t, string(rec1_expect), string(res)) } @@ -305,7 +301,7 @@ func TestMongoDBgetRecordByIDFails(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - _, err := db.ProcessRequest(dbname, collection, groupId, "id", "2") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "2"}) assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":1,\"next_substream\":\"\"}", err.Error()) } @@ -314,7 +310,7 @@ func TestMongoDBGetRecordNext(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Nil(t, err) assert.Equal(t, string(rec1_expect), string(res)) } @@ -325,8 +321,8 @@ func TestMongoDBGetRecordNextMultipleCollections(t *testing.T) { db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection2, &rec_dataset1) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "") - res_string, err2 := db.ProcessRequest(dbname, collection2, groupId, "next_dataset", "") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) + res_string, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection2, GroupId: groupId, Op: "next", DatasetOp: true}) var res_ds TestDataset json.Unmarshal(res_string, &res_ds) @@ -342,7 +338,7 @@ func TestMongoDBGetRecordID(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - res, err := db.ProcessRequest(dbname, collection, groupId, "id", "1") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "1"}) assert.Nil(t, err) assert.Equal(t, string(rec1_expect), string(res)) } @@ -351,7 +347,7 @@ func TestMongoDBWrongOp(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - _, err := db.ProcessRequest(dbname, collection, groupId, "bla", "0") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "bla"}) assert.NotNil(t, err) } @@ -361,7 +357,7 @@ func TestMongoDBGetRecordLast(t *testing.T) { db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "last", "0") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "last", ExtraParam: "0"}) assert.Nil(t, err) assert.Equal(t, string(rec2_expect), string(res)) } @@ -372,15 +368,15 @@ func TestMongoDBGetNextAfterGetLastCorrect(t *testing.T) { db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "last", "0") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "last", ExtraParam: "0"}) assert.Nil(t, err) assert.Equal(t, string(rec2_expect), string(res)) db.insertRecord(dbname, collection, &rec3) - res, err = db.ProcessRequest(dbname, collection, groupId, "next", "") + res, err = db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Nil(t, err) - assert.Equal(t, string(rec3_expect), string(res)) + assert.Equal(t, string(rec1_expect), string(res)) } @@ -391,7 +387,7 @@ func TestMongoDBGetSize(t *testing.T) { db.insertRecord(dbname, collection, &rec2) db.insertRecord(dbname, collection, &rec3) - res, err := db.ProcessRequest(dbname, collection, "", "size", "0") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "size"}) assert.Nil(t, err) assert.Equal(t, string(recs1_expect), string(res)) } @@ -400,7 +396,7 @@ func TestMongoDBGetSizeNoRecords(t *testing.T) { db.Connect(dbaddress) defer cleanup() - res, err := db.ProcessRequest(dbname, collection, "", "size", "0") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "size"}) assert.Nil(t, err) assert.Equal(t, string(recs2_expect), string(res)) } @@ -418,7 +414,7 @@ func TestMongoPingNotConected(t *testing.T) { } func TestMongoDBgetRecordByIDNotConnected(t *testing.T) { - _, err := db.ProcessRequest(dbname, collection, "", "id", "2") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", ExtraParam: "1"}) assert.Equal(t, utils.StatusServiceUnavailable, err.(*DBError).Code) } @@ -428,15 +424,15 @@ func TestMongoDBResetCounter(t *testing.T) { db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "") + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Nil(t, err1) assert.Equal(t, string(rec1_expect), string(res1)) - _, err_reset := db.ProcessRequest(dbname, collection, groupId, "resetcounter", "1") + _, err_reset := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "resetcounter", ExtraParam: "1"}) assert.Nil(t, err_reset) - res2, err2 := db.ProcessRequest(dbname, collection, groupId, "next", "") + res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) assert.Nil(t, err2) assert.Equal(t, string(rec2_expect), string(res2)) @@ -450,7 +446,7 @@ func TestMongoDBGetMetaOK(t *testing.T) { rec_expect, _ := json.Marshal(recm) db.insertMeta(dbname, &recm) - res, err := db.ProcessRequest(dbname, collection, "", "meta", metaID_str) + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "meta", ExtraParam: metaID_str}) assert.Nil(t, err) assert.Equal(t, string(rec_expect), string(res)) @@ -460,7 +456,7 @@ func TestMongoDBGetMetaErr(t *testing.T) { db.Connect(dbaddress) defer cleanup() - _, err := db.ProcessRequest(dbname, collection, "", "meta", metaID_str) + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "meta", ExtraParam: metaID_str}) assert.NotNil(t, err) } @@ -486,7 +482,7 @@ var tests = []struct { res []TestRecordMeta ok bool }{ - {"_id > 0", []TestRecordMeta{recq1, recq2,recq3,recq4}, true}, + {"_id > 0", []TestRecordMeta{recq1, recq2, recq3, recq4}, true}, {"meta.counter = 10", []TestRecordMeta{recq1, recq3}, true}, {"meta.counter = 10 ORDER BY _id DESC", []TestRecordMeta{recq3, recq1}, true}, {"meta.counter > 10 ORDER BY meta.counter DESC", []TestRecordMeta{recq4, recq2}, true}, @@ -535,7 +531,7 @@ func TestMongoDBQueryImagesOK(t *testing.T) { // continue // } - res_string, err := db.ProcessRequest(dbname, collection, "", "queryimages", test.query) + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "queryimages", ExtraParam: test.query}) var res []TestRecordMeta json.Unmarshal(res_string, &res) // fmt.Println(string(res_string)) @@ -554,7 +550,7 @@ func TestMongoDBQueryImagesOnEmptyDatabase(t *testing.T) { db.Connect(dbaddress) defer cleanup() for _, test := range tests { - res_string, err := db.ProcessRequest(dbname, collection, "", "queryimages", test.query) + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, Op: "queryimages", ExtraParam: test.query}) var res []TestRecordMeta json.Unmarshal(res_string, &res) assert.Equal(t, 0, len(res)) @@ -568,6 +564,7 @@ func TestMongoDBQueryImagesOnEmptyDatabase(t *testing.T) { var rec_dataset1 = TestDataset{1, 3, []TestRecord{rec1, rec2, rec3}} var rec_dataset1_incomplete = TestDataset{1, 4, []TestRecord{rec1, rec2, rec3}} +var rec_dataset2_incomplete = TestDataset{2, 4, []TestRecord{rec1, rec2, rec3}} var rec_dataset2 = TestDataset{2, 4, []TestRecord{rec1, rec2, rec3}} var rec_dataset3 = TestDataset{3, 3, []TestRecord{rec3, rec2, rec2}} @@ -577,7 +574,7 @@ func TestMongoDBGetDataset(t *testing.T) { db.insertRecord(dbname, collection, &rec_dataset1) - res_string, err := db.ProcessRequest(dbname, collection, groupId, "next_dataset", "") + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", DatasetOp: true}) assert.Nil(t, err) @@ -593,14 +590,49 @@ func TestMongoDBNoDataOnNotCompletedFirstDataset(t *testing.T) { db.insertRecord(dbname, collection, &rec_dataset1_incomplete) - res_string, err := db.ProcessRequest(dbname, collection, groupId, "next_dataset", "") + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", DatasetOp: true}) - assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) - assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":0,\"id_max\":0,\"next_substream\":\"\"}", err.(*DBError).Message) + assert.Equal(t, utils.StatusPartialData, err.(*DBError).Code) + var res TestDataset + json.Unmarshal([]byte(err.(*DBError).Message), &res) + assert.Equal(t, rec_dataset1_incomplete, res) +} + +func TestMongoDBNoDataOnNotCompletedNextDataset(t *testing.T) { + db.Connect(dbaddress) + defer cleanup() + + db.insertRecord(dbname, collection, &rec_dataset1_incomplete) + db.insertRecord(dbname, collection, &rec_dataset2_incomplete) + + _, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", DatasetOp: true}) + _, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", DatasetOp: true}) + + assert.Equal(t, utils.StatusPartialData, err1.(*DBError).Code) + assert.Equal(t, utils.StatusPartialData, err2.(*DBError).Code) + var res TestDataset + json.Unmarshal([]byte(err2.(*DBError).Message), &res) + assert.Equal(t, rec_dataset2_incomplete, res) +} + + +func TestMongoDBReturnInCompletedDataset(t *testing.T) { + db.Connect(dbaddress) + defer cleanup() + + db.insertRecord(dbname, collection, &rec_dataset1_incomplete) + + res_string, err := db.ProcessRequest(Request{DbName: dbname, + DbCollectionName: collection, GroupId: groupId, Op: "next", DatasetOp: true, MinDatasetSize: 1}) - assert.Equal(t, "", string(res_string)) + assert.Nil(t, err) + var res TestDataset + json.Unmarshal(res_string, &res) + + assert.Equal(t, rec_dataset1_incomplete, res) } + func TestMongoDBGetRecordLastDataSetSkipsIncompleteSets(t *testing.T) { db.Connect(dbaddress) defer cleanup() @@ -608,7 +640,7 @@ func TestMongoDBGetRecordLastDataSetSkipsIncompleteSets(t *testing.T) { db.insertRecord(dbname, collection, &rec_dataset1) db.insertRecord(dbname, collection, &rec_dataset2) - res_string, err := db.ProcessRequest(dbname, collection, groupId, "last_dataset", "0") + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "last", DatasetOp:true, ExtraParam: "0"}) assert.Nil(t, err) @@ -618,6 +650,24 @@ func TestMongoDBGetRecordLastDataSetSkipsIncompleteSets(t *testing.T) { assert.Equal(t, rec_dataset1, res) } +func TestMongoDBGetRecordLastDataSetReturnsIncompleteSets(t *testing.T) { + db.Connect(dbaddress) + defer cleanup() + + db.insertRecord(dbname, collection, &rec_dataset1) + db.insertRecord(dbname, collection, &rec_dataset2) + + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "last", + DatasetOp:true,MinDatasetSize: 2,ExtraParam: "0"}) + + assert.Nil(t, err) + + var res TestDataset + json.Unmarshal(res_string, &res) + + assert.Equal(t, rec_dataset2, res) +} + func TestMongoDBGetRecordLastDataSetOK(t *testing.T) { db.Connect(dbaddress) defer cleanup() @@ -625,7 +675,7 @@ func TestMongoDBGetRecordLastDataSetOK(t *testing.T) { db.insertRecord(dbname, collection, &rec_dataset1) db.insertRecord(dbname, collection, &rec_dataset3) - res_string, err := db.ProcessRequest(dbname, collection, groupId, "last_dataset", "0") + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "last", DatasetOp: true, ExtraParam: "0"}) assert.Nil(t, err) @@ -640,7 +690,7 @@ func TestMongoDBGetDatasetID(t *testing.T) { defer cleanup() db.insertRecord(dbname, collection, &rec_dataset1) - res_string, err := db.ProcessRequest(dbname, collection, groupId, "id_dataset", "1") + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", DatasetOp:true, ExtraParam: "1"}) assert.Nil(t, err) @@ -651,34 +701,67 @@ func TestMongoDBGetDatasetID(t *testing.T) { } +func TestMongoDBErrorOnIncompleteDatasetID(t *testing.T) { + db.Connect(dbaddress) + defer cleanup() + db.insertRecord(dbname, collection, &rec_dataset1_incomplete) + + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", DatasetOp:true, ExtraParam: "1"}) + + assert.Equal(t, utils.StatusPartialData, err.(*DBError).Code) + + var res TestDataset + json.Unmarshal([]byte(err.(*DBError).Message), &res) + + assert.Equal(t, rec_dataset1_incomplete, res) + +} + +func TestMongoDBOkOnIncompleteDatasetID(t *testing.T) { + db.Connect(dbaddress) + defer cleanup() + db.insertRecord(dbname, collection, &rec_dataset1_incomplete) + + res_string, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "id", DatasetOp:true,MinDatasetSize: 3,ExtraParam: "1"}) + + assert.Nil(t, err) + + var res TestDataset + json.Unmarshal(res_string, &res) + + assert.Equal(t, rec_dataset1_incomplete, res) + +} + type Substream struct { name string records []TestRecord } var testsSubstreams = []struct { - from string - substreams []Substream + from string + substreams []Substream expectedSubstreams SubstreamsRecord - test string - ok bool + test string + ok bool }{ - {"",[]Substream{},SubstreamsRecord{[]SubstreamInfo{}}, "no substreams", true}, - {"",[]Substream{{"ss1",[]TestRecord{rec2,rec1}}},SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1",Timestamp: 0}}}, "one substream", true}, - {"",[]Substream{{"ss1",[]TestRecord{rec2,rec1}},{"ss2",[]TestRecord{rec2,rec3}}},SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1",Timestamp: 0},SubstreamInfo{Name: "ss2",Timestamp: 1}}}, "two substreams", true}, - {"ss2",[]Substream{{"ss1",[]TestRecord{rec1,rec2}},{"ss2",[]TestRecord{rec2,rec3}}},SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss2",Timestamp: 1}}}, "with from", true}, + {"", []Substream{}, SubstreamsRecord{[]SubstreamInfo{}}, "no substreams", true}, + {"", []Substream{{"ss1", []TestRecord{rec2, rec1}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1", Timestamp: 0}}}, "one substream", true}, + {"", []Substream{{"ss1", []TestRecord{rec2, rec1}}, {"ss2", []TestRecord{rec2, rec3}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss1", Timestamp: 0}, SubstreamInfo{Name: "ss2", Timestamp: 1}}}, "two substreams", true}, + {"ss2", []Substream{{"ss1", []TestRecord{rec1, rec2}}, {"ss2", []TestRecord{rec2, rec3}}}, SubstreamsRecord{[]SubstreamInfo{SubstreamInfo{Name: "ss2", Timestamp: 1}}}, "with from", true}, } func TestMongoDBListSubstreams(t *testing.T) { for _, test := range testsSubstreams { db.Connect(dbaddress) for _, substream := range test.substreams { - for _,rec:= range substream.records { + for _, rec := range substream.records { db.insertRecord(dbname, substream.name, &rec) } } var rec_substreams_expect, _ = json.Marshal(test.expectedSubstreams) - res, err := db.ProcessRequest(dbname, "0", "0", "substreams", test.from) + + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: "0", Op: "substreams", ExtraParam: test.from}) if test.ok { assert.Nil(t, err, test.test) assert.Equal(t, string(rec_substreams_expect), string(res), test.test) @@ -695,50 +778,52 @@ func TestMongoDBAckImage(t *testing.T) { db.insertRecord(dbname, collection, &rec1) query_str := "{\"Id\":1,\"Op\":\"ackimage\"}" - res, err := db.ProcessRequest(dbname, collection, groupId, "ackimage", query_str) - nacks,_ := db.getNacks(dbname,collection,groupId,1,1) + + request := Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "ackimage", ExtraParam: query_str} + res, err := db.ProcessRequest(request) + nacks, _ := db.getNacks(request, 1, 1) assert.Nil(t, err) assert.Equal(t, "", string(res)) assert.Equal(t, 0, len(nacks)) } var testsNacs = []struct { - rangeString string - resString string + rangeString string + resString string insertRecords bool - ackRecords bool - ok bool - test string + ackRecords bool + ok bool + test string }{ - {"0_0", "{\"unacknowledged\":[1,2,3,4,5,6,7,8,9,10]}",true,false,true,"whole range"}, - {"", "{\"unacknowledged\":[1,2,3,4,5,6,7,8,9,10]}",true,false,false,"empty string range"}, - {"0_5", "{\"unacknowledged\":[1,2,3,4,5]}",true,false,true,"to given"}, - {"5_0", "{\"unacknowledged\":[5,6,7,8,9,10]}",true,false,true,"from given"}, - {"3_7", "{\"unacknowledged\":[3,4,5,6,7]}",true,false,true,"range given"}, - {"1_1", "{\"unacknowledged\":[1]}",true,false,true,"single record"}, - {"3_1", "{\"unacknowledged\":[]}",true,false,false,"to lt from"}, - {"0_0", "{\"unacknowledged\":[]}",false,false,true,"no records"}, - {"0_0", "{\"unacknowledged\":[1,5,6,7,8,9,10]}",true,true,true,"skip acks"}, - {"2_4", "{\"unacknowledged\":[]}",true,true,true,"all acknowledged"}, - {"1_4", "{\"unacknowledged\":[1]}",true,true,true,"some acknowledged"}, + {"0_0", "{\"unacknowledged\":[1,2,3,4,5,6,7,8,9,10]}", true, false, true, "whole range"}, + {"", "{\"unacknowledged\":[1,2,3,4,5,6,7,8,9,10]}", true, false, false, "empty string range"}, + {"0_5", "{\"unacknowledged\":[1,2,3,4,5]}", true, false, true, "to given"}, + {"5_0", "{\"unacknowledged\":[5,6,7,8,9,10]}", true, false, true, "from given"}, + {"3_7", "{\"unacknowledged\":[3,4,5,6,7]}", true, false, true, "range given"}, + {"1_1", "{\"unacknowledged\":[1]}", true, false, true, "single record"}, + {"3_1", "{\"unacknowledged\":[]}", true, false, false, "to lt from"}, + {"0_0", "{\"unacknowledged\":[]}", false, false, true, "no records"}, + {"0_0", "{\"unacknowledged\":[1,5,6,7,8,9,10]}", true, true, true, "skip acks"}, + {"2_4", "{\"unacknowledged\":[]}", true, true, true, "all acknowledged"}, + {"1_4", "{\"unacknowledged\":[1]}", true, true, true, "some acknowledged"}, } - func TestMongoDBNacks(t *testing.T) { for _, test := range testsNacs { db.Connect(dbaddress) - if test.insertRecords { + if test.insertRecords { insertRecords(10) } - if (test.ackRecords) { - db.ackRecord(dbname, collection, groupId,"{\"Id\":2,\"Op\":\"ackimage\"}") - db.ackRecord(dbname, collection, groupId,"{\"Id\":3,\"Op\":\"ackimage\"}") - db.ackRecord(dbname, collection, groupId,"{\"Id\":4,\"Op\":\"ackimage\"}") + if test.ackRecords { + db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackimage\"}"}) + db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackimage\"}"}) + db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackimage\"}"}) } - res, err := db.ProcessRequest(dbname, collection, groupId, "nacks", test.rangeString) + + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "nacks", ExtraParam: test.rangeString}) if test.ok { assert.Nil(t, err, test.test) - assert.Equal(t, test.resString, string(res),test.test) + assert.Equal(t, test.resString, string(res), test.test) } else { assert.NotNil(t, err, test.test) } @@ -748,43 +833,43 @@ func TestMongoDBNacks(t *testing.T) { var testsLastAcs = []struct { insertRecords bool - ackRecords bool - resString string - test string + ackRecords bool + resString string + test string }{ - {false,false,"{\"lastAckId\":0}","empty db"}, - {true,false,"{\"lastAckId\":0}","no acks"}, - {true,true,"{\"lastAckId\":4}","last ack 4"}, + {false, false, "{\"lastAckId\":0}", "empty db"}, + {true, false, "{\"lastAckId\":0}", "no acks"}, + {true, true, "{\"lastAckId\":4}", "last ack 4"}, } - func TestMongoDBLastAcks(t *testing.T) { for _, test := range testsLastAcs { db.Connect(dbaddress) - if test.insertRecords { + if test.insertRecords { insertRecords(10) } - if (test.ackRecords) { - db.ackRecord(dbname, collection, groupId,"{\"Id\":2,\"Op\":\"ackimage\"}") - db.ackRecord(dbname, collection, groupId,"{\"Id\":3,\"Op\":\"ackimage\"}") - db.ackRecord(dbname, collection, groupId,"{\"Id\":4,\"Op\":\"ackimage\"}") + if test.ackRecords { + db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":2,\"Op\":\"ackimage\"}"}) + db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":3,\"Op\":\"ackimage\"}"}) + db.ackRecord(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, ExtraParam: "{\"Id\":4,\"Op\":\"ackimage\"}"}) } - res, err := db.ProcessRequest(dbname, collection, groupId, "lastack", "") + + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "lastack"}) assert.Nil(t, err, test.test) - assert.Equal(t, test.resString, string(res),test.test) + assert.Equal(t, test.resString, string(res), test.test) cleanup() } } - func TestMongoDBGetNextUsesInprocessedImmedeatly(t *testing.T) { db.SetSettings(DBSettings{ReadFromInprocessPeriod: 0}) db.Connect(dbaddress) defer cleanup() err := db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") + + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) assert.Nil(t, err) assert.Nil(t, err1) @@ -797,14 +882,14 @@ func TestMongoDBGetNextUsesInprocessedNumRetry(t *testing.T) { db.Connect(dbaddress) defer cleanup() err := db.insertRecord(dbname, collection, &rec1) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "0_1") - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "0_1") - _, err2 := db.ProcessRequest(dbname, collection, groupId, "next", "0_1") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"}) + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"}) + _, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"}) assert.Nil(t, err) assert.Nil(t, err1) assert.NotNil(t, err2) - if err2!=nil { + if err2 != nil { assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err2.Error()) } assert.Equal(t, string(rec1_expect), string(res)) @@ -817,10 +902,10 @@ func TestMongoDBGetNextUsesInprocessedAfterTimeout(t *testing.T) { defer cleanup() err := db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "1_3") - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "1_3") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"}) + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"}) time.Sleep(time.Second) - res2, err2 := db.ProcessRequest(dbname, collection, groupId, "next", "1_3") + res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"}) assert.Nil(t, err) assert.Nil(t, err1) assert.Nil(t, err2) @@ -835,10 +920,10 @@ func TestMongoDBGetNextReturnsToNormalAfterUsesInprocessed(t *testing.T) { defer cleanup() err := db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "1_3") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"}) time.Sleep(time.Second) - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "1_3") - res2, err2 := db.ProcessRequest(dbname, collection, groupId, "next", "1_3") + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"}) + res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "1_3"}) assert.Nil(t, err) assert.Nil(t, err1) assert.Nil(t, err2) @@ -847,15 +932,14 @@ func TestMongoDBGetNextReturnsToNormalAfterUsesInprocessed(t *testing.T) { assert.Equal(t, string(rec2_expect), string(res2)) } - func TestMongoDBGetNextUsesInprocessedImmedeatlyIfFinishedStream(t *testing.T) { db.SetSettings(DBSettings{ReadFromInprocessPeriod: 10}) db.Connect(dbaddress) defer cleanup() err := db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec_finished) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) assert.Nil(t, err) assert.Nil(t, err1) assert.Equal(t, string(rec1_expect), string(res)) @@ -868,9 +952,9 @@ func TestMongoDBGetNextUsesInprocessedImmedeatlyIfEndofStream(t *testing.T) { defer cleanup() err := db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") - res2, err2 := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) + res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) assert.Nil(t, err) assert.Nil(t, err1) assert.Nil(t, err2) @@ -884,43 +968,43 @@ func TestMongoDBAckDeletesInprocessed(t *testing.T) { db.Connect(dbaddress) defer cleanup() db.insertRecord(dbname, collection, &rec1) - db.ProcessRequest(dbname, collection, groupId, "next", "0_3") + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) query_str := "{\"Id\":1,\"Op\":\"ackimage\"}" - db.ProcessRequest(dbname, collection, groupId, "ackimage", query_str) - _, err := db.ProcessRequest(dbname, collection, groupId, "next", "0_3") + + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "ackimage", ExtraParam: query_str}) + _, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_3"}) assert.NotNil(t, err) - if err!=nil { + if err != nil { assert.Equal(t, utils.StatusNoData, err.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err.Error()) } } - func TestMongoDBNegAck(t *testing.T) { db.SetSettings(DBSettings{ReadFromInprocessPeriod: 0}) db.Connect(dbaddress) defer cleanup() inputParams := struct { - Id int + Id int Params struct { DelaySec int } }{} inputParams.Id = 1 - inputParams.Params.DelaySec=0 - + inputParams.Params.DelaySec = 0 db.insertRecord(dbname, collection, &rec1) - db.ProcessRequest(dbname, collection, groupId, "next", "") - bparam,_:= json.Marshal(&inputParams) - db.ProcessRequest(dbname, collection, groupId, "negackimage", string(bparam)) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "") // first time image from negack - _, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "") // second time nothing + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) + bparam, _ := json.Marshal(&inputParams) + + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "negackimage", ExtraParam: string(bparam)}) + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) // first time image from negack + _, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next"}) // second time nothing assert.Nil(t, err) assert.Equal(t, string(rec1_expect), string(res)) assert.NotNil(t, err1) - if err1!=nil { + if err1 != nil { assert.Equal(t, utils.StatusNoData, err1.(*DBError).Code) assert.Equal(t, "{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}", err1.Error()) } @@ -932,11 +1016,12 @@ func TestMongoDBGetNextClearsInprocessAfterReset(t *testing.T) { defer cleanup() err := db.insertRecord(dbname, collection, &rec1) db.insertRecord(dbname, collection, &rec2) - res, err := db.ProcessRequest(dbname, collection, groupId, "next", "0_1") - res1, err1 := db.ProcessRequest(dbname, collection, groupId, "next", "0_1") - db.ProcessRequest(dbname, collection, groupId, "resetcounter", "0") - res2, err2 := db.ProcessRequest(dbname, collection, groupId, "next", "0_1") - res3, err3 := db.ProcessRequest(dbname, collection, groupId, "next", "0_1") + res, err := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"}) + res1, err1 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"}) + + db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "resetcounter", ExtraParam: "0"}) + res2, err2 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"}) + res3, err3 := db.ProcessRequest(Request{DbName: dbname, DbCollectionName: collection, GroupId: groupId, Op: "next", ExtraParam: "0_1"}) assert.Nil(t, err) assert.Nil(t, err1) @@ -946,4 +1031,4 @@ func TestMongoDBGetNextClearsInprocessAfterReset(t *testing.T) { assert.Equal(t, string(rec1_expect), string(res1)) assert.Equal(t, string(rec1_expect), string(res2)) assert.Equal(t, string(rec1_expect), string(res3)) -} \ No newline at end of file +} diff --git a/broker/src/asapo_broker/server/get_commands_test.go b/broker/src/asapo_broker/server/get_commands_test.go index c0a4eba12b086ee9be0ecc32efc2795ae6afef28..2f2f9fadba8fea7297b6d555c07a28d9c33952f5 100644 --- a/broker/src/asapo_broker/server/get_commands_test.go +++ b/broker/src/asapo_broker/server/get_commands_test.go @@ -40,8 +40,8 @@ var testsGetCommand = []struct { queryParams string externalParam string }{ - {"last", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/last","","0"}, - {"id", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/1","","1"}, + {"last", expectedSubstream, "", expectedSubstream + "/0/last","","0"}, + {"id", expectedSubstream, "", expectedSubstream + "/0/1","","1"}, {"meta", "default", "", "default/0/meta/0","","0"}, {"nacks", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/nacks","","0_0"}, {"next", expectedSubstream, expectedGroupID, expectedSubstream + "/" + expectedGroupID + "/next","",""}, @@ -56,7 +56,7 @@ var testsGetCommand = []struct { func (suite *GetCommandsTestSuite) TestGetCommandsCallsCorrectRoutine() { for _, test := range testsGetCommand { - suite.mock_db.On("ProcessRequest", expectedDBName, test.substream, test.groupid, test.command, test.externalParam).Return([]byte("Hello"), nil) + suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: test.substream, GroupId: test.groupid, Op: test.command, ExtraParam: test.externalParam}).Return([]byte("Hello"), nil) logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request "+test.command))) w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + test.reqString+correctTokenSuffix+test.queryParams) suite.Equal(http.StatusOK, w.Code, test.command+ " OK") diff --git a/broker/src/asapo_broker/server/get_id.go b/broker/src/asapo_broker/server/get_id.go index 5eea8aea38e8e03ebde445266fa4c5f326a9c805..34ddb643e9b2b12e7b8e848a01b6770766d2c37f 100644 --- a/broker/src/asapo_broker/server/get_id.go +++ b/broker/src/asapo_broker/server/get_id.go @@ -20,5 +20,5 @@ func routeGetByID(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) return } - processRequest(w, r, "id", id, true) + processRequest(w, r, "id", id, false) } diff --git a/broker/src/asapo_broker/server/get_last.go b/broker/src/asapo_broker/server/get_last.go index 5b0bcd616547aeec3f119fa8f7b5e40186111265..44a38b99459c16f94be9c258bcd736a6715ee30a 100644 --- a/broker/src/asapo_broker/server/get_last.go +++ b/broker/src/asapo_broker/server/get_last.go @@ -5,5 +5,5 @@ import ( ) func routeGetLast(w http.ResponseWriter, r *http.Request) { - processRequest(w, r, "last", "0", true) + processRequest(w, r, "last", "0", false) } diff --git a/broker/src/asapo_broker/server/get_meta_test.go b/broker/src/asapo_broker/server/get_meta_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4e305ea3e7077135002a2403603c078678a4bd76 --- /dev/null +++ b/broker/src/asapo_broker/server/get_meta_test.go @@ -0,0 +1,41 @@ +package server + +import ( + "asapo_broker/database" + "asapo_common/logger" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + "net/http" + "testing" +) + +type GetMetaTestSuite struct { + suite.Suite + mock_db *database.MockedDatabase +} + +func (suite *GetMetaTestSuite) SetupTest() { + statistics.Reset() + suite.mock_db = new(database.MockedDatabase) + db = suite.mock_db + prepareTestAuth() + logger.SetMockLog() +} + +func (suite *GetMetaTestSuite) TearDownTest() { + assertExpectations(suite.T(), suite.mock_db) + logger.UnsetMockLog() + db = nil +} + +func TestGetMetaTestSuite(t *testing.T) { + suite.Run(t, new(GetMetaTestSuite)) +} + +func (suite *GetMetaTestSuite) TestGetMetaOK() { + suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, Op: "meta", ExtraParam: "1"}).Return([]byte(""), nil) + logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request meta"))) + w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/0/meta" + "/1" + correctTokenSuffix,"GET") + suite.Equal(http.StatusOK, w.Code, "meta OK") +} + diff --git a/broker/src/asapo_broker/server/get_next.go b/broker/src/asapo_broker/server/get_next.go index 8297dc97322cb60595b9fbb82bd3a5c046d1d986..9e588fb9a73da679beffef9598aa9aa8d5df61d1 100644 --- a/broker/src/asapo_broker/server/get_next.go +++ b/broker/src/asapo_broker/server/get_next.go @@ -4,8 +4,6 @@ import ( "net/http" ) - - func extractResend(r *http.Request) (string) { keys := r.URL.Query() resend := keys.Get("resend_nacks") @@ -18,7 +16,6 @@ func extractResend(r *http.Request) (string) { return resend_params } - func routeGetNext(w http.ResponseWriter, r *http.Request) { processRequest(w, r, "next", extractResend(r), true) } diff --git a/broker/src/asapo_broker/server/listroutes.go b/broker/src/asapo_broker/server/listroutes.go index 24ceab6e63cc13ec63e5ecfa107a387184f3d136..f971d71994e0ba718f383dd41d7fb65c351fc331 100644 --- a/broker/src/asapo_broker/server/listroutes.go +++ b/broker/src/asapo_broker/server/listroutes.go @@ -26,7 +26,7 @@ var listRoutes = utils.Routes{ utils.Route{ "GetLast", "Get", - "/database/{dbname}/{stream}/{substream}/{groupid}/last", + "/database/{dbname}/{stream}/{substream}/0/last", routeGetLast, }, utils.Route{ @@ -44,7 +44,7 @@ var listRoutes = utils.Routes{ utils.Route{ "GetID", "Get", - "/database/{dbname}/{stream}/{substream}/{groupid}/{id}", + "/database/{dbname}/{stream}/{substream}/0/{id}", routeGetByID, }, utils.Route{ diff --git a/broker/src/asapo_broker/server/post_op_image_test.go b/broker/src/asapo_broker/server/post_op_image_test.go index fbba23069496813d34f5aae1ca30cd5387ac3c45..94fdf49f6057390156cd8656feb93ce5ab4ea6f8 100644 --- a/broker/src/asapo_broker/server/post_op_image_test.go +++ b/broker/src/asapo_broker/server/post_op_image_test.go @@ -34,8 +34,21 @@ func TestImageOpTestSuite(t *testing.T) { func (suite *ImageOpTestSuite) TestAckImageOpOK() { query_str := "{\"Id\":1,\"Op\":\"ackimage\"}" - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, expectedGroupID, "ackimage", query_str).Return([]byte(""), nil) + suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId: expectedGroupID, Op: "ackimage", ExtraParam: query_str}).Return([]byte(""), nil) logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request ackimage"))) w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str) suite.Equal(http.StatusOK, w.Code, "ackimage OK") } + + +func (suite *ImageOpTestSuite) TestAckImageOpErrorWrongOp() { + query_str := "\"Id\":1,\"Op\":\"ackimage\"}" + w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/1" + correctTokenSuffix,"POST",query_str) + suite.Equal(http.StatusBadRequest, w.Code, "ackimage wrong") +} + +func (suite *ImageOpTestSuite) TestAckImageOpErrorWrongID() { + query_str := "{\"Id\":1,\"Op\":\"ackimage\"}" + w := doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/bla" + correctTokenSuffix,"POST",query_str) + suite.Equal(http.StatusBadRequest, w.Code, "ackimage wrong") +} diff --git a/broker/src/asapo_broker/server/post_query_images_test.go b/broker/src/asapo_broker/server/post_query_images_test.go index 51ed9c45fd32814564fb87ccf9fc17b783897a2e..5ac71bca05115779607015c7c1c604c996c20515 100644 --- a/broker/src/asapo_broker/server/post_query_images_test.go +++ b/broker/src/asapo_broker/server/post_query_images_test.go @@ -34,9 +34,11 @@ func TestQueryTestSuite(t *testing.T) { func (suite *QueryTestSuite) TestQueryOK() { query_str := "aaaa" - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, "", "queryimages", query_str).Return([]byte("{}"), nil) + + suite.mock_db.On("ProcessRequest", database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream,Op: "queryimages", ExtraParam: query_str}).Return([]byte("{}"), nil) logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request queryimages"))) w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedSubstream+"/0/queryimages"+correctTokenSuffix, "POST", query_str) suite.Equal(http.StatusOK, w.Code, "Query OK") } + diff --git a/broker/src/asapo_broker/server/post_reset_counter_test.go b/broker/src/asapo_broker/server/post_reset_counter_test.go index bb2f2b2a224ea666e0c543046dd4264f2539ad72..d35f116a15d063dc6be8264f59ac0468b54c3ee1 100644 --- a/broker/src/asapo_broker/server/post_reset_counter_test.go +++ b/broker/src/asapo_broker/server/post_reset_counter_test.go @@ -33,7 +33,9 @@ func TestResetCounterTestSuite(t *testing.T) { } func (suite *ResetCounterTestSuite) TestResetCounterOK() { - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, expectedGroupID, "resetcounter", "10").Return([]byte(""), nil) + expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "resetcounter", ExtraParam: "10"} + suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), nil) + logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request resetcounter"))) w := doRequest("/database/"+expectedBeamtimeId+"/"+expectedStream+"/"+expectedSubstream+"/"+expectedGroupID+"/resetcounter"+correctTokenSuffix+"&value=10", "POST") diff --git a/broker/src/asapo_broker/server/process_request.go b/broker/src/asapo_broker/server/process_request.go index d65dce7748b43c61ac255e9df1f18f100752bf7c..4adf102b6d48b8319780b1af186122251a45bdc9 100644 --- a/broker/src/asapo_broker/server/process_request.go +++ b/broker/src/asapo_broker/server/process_request.go @@ -24,8 +24,6 @@ func extractRequestParameters(r *http.Request, needGroupID bool) (string, string return db_name, stream, substream, group_id, ok1 && ok2 && ok3 && ok4 } -var Sink bool - func IsLetterOrNumbers(s string) bool { for _, r := range s { if (r < 'a' || r > 'z') && (r < 'A' || r > 'Z') && (r<'0' || r>'9') { @@ -69,27 +67,25 @@ func processRequest(w http.ResponseWriter, r *http.Request, op string, extra_par return } - if datasetRequested(r) { - op = op + "_dataset" + request := database.Request{} + request.DbName = db_name+"_"+stream + request.Op = op + request.ExtraParam = extra_param + request.DbCollectionName = substream + request.GroupId = group_id + if yes, minSize := datasetRequested(r); yes { + request.DatasetOp = true + request.MinDatasetSize = minSize } - answer, code := processRequestInDb(db_name+"_"+stream, substream, group_id, op, extra_param) + answer, code := processRequestInDb(request) w.WriteHeader(code) w.Write(answer) } -func getStatusCodeFromDbError(err error) int { - err_db, ok := err.(*database.DBError) - if ok { - return err_db.Code - } else { - return utils.StatusServiceUnavailable - } -} - func returnError(err error, log_str string) (answer []byte, code int) { - code = getStatusCodeFromDbError(err) - if code != utils.StatusNoData { + code = database.GetStatusCodeFromError(err) + if code != utils.StatusNoData && code != utils.StatusPartialData{ logger.Error(log_str + " - " + err.Error()) } else { logger.Debug(log_str + " - " + err.Error()) @@ -98,7 +94,7 @@ func returnError(err error, log_str string) (answer []byte, code int) { } func reconnectIfNeeded(db_error error) { - code := getStatusCodeFromDbError(db_error) + code := database.GetStatusCodeFromError(db_error) if code != utils.StatusServiceUnavailable { return } @@ -110,10 +106,10 @@ func reconnectIfNeeded(db_error error) { } } -func processRequestInDb(db_name string, data_collection_name string, group_id string, op string, extra_param string) (answer []byte, code int) { +func processRequestInDb(request database.Request) (answer []byte, code int) { statistics.IncreaseCounter() - answer, err := db.ProcessRequest(db_name, data_collection_name, group_id, op, extra_param) - log_str := "processing request " + op + " in " + db_name + " at " + settings.GetDatabaseServer() + answer, err := db.ProcessRequest(request) + log_str := "processing request " + request.Op + " in " + request.DbName + " at " + settings.GetDatabaseServer() if err != nil { go reconnectIfNeeded(err) return returnError(err, log_str) diff --git a/broker/src/asapo_broker/server/process_request_test.go b/broker/src/asapo_broker/server/process_request_test.go index 1757decba923ba6e98e2b107b2f7b6c3f168b4f1..5aa7b28fc2a622dc88a6ec6f0c02c1951517c233 100644 --- a/broker/src/asapo_broker/server/process_request_test.go +++ b/broker/src/asapo_broker/server/process_request_test.go @@ -123,7 +123,10 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithNoToken() { } func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongDatabaseName() { - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, expectedGroupID, "next", "").Return([]byte(""), + + expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"} + + suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), &database.DBError{utils.StatusNoData, ""}) logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next"))) @@ -134,7 +137,10 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithWrongDatabaseName() } func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() { - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, expectedGroupID, "next", "").Return([]byte(""), + + expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"} + + suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), &database.DBError{utils.StatusServiceUnavailable, ""}) logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("processing request next"))) @@ -147,7 +153,11 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithConnectionError() { } func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() { - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, expectedGroupID, "next", "").Return([]byte(""), errors.New("")) + + expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"} + + + suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte(""), errors.New("")) logger.MockLog.On("Error", mock.MatchedBy(containsMatcher("processing request next"))) logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("reconnected"))) @@ -159,7 +169,11 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWithInternalDBError() { } func (suite *ProcessRequestTestSuite) TestProcessRequestAddsCounter() { - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, expectedGroupID, "next", "").Return([]byte("Hello"), nil) + + expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, Op: "next"} + suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil) + + logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName))) doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix) @@ -173,8 +187,11 @@ func (suite *ProcessRequestTestSuite) TestProcessRequestWrongGroupID() { } func (suite *ProcessRequestTestSuite) TestProcessRequestAddsDataset() { - suite.mock_db.On("ProcessRequest", expectedDBName, expectedSubstream, expectedGroupID, "next_dataset", "").Return([]byte("Hello"), nil) - logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next_dataset in "+expectedDBName))) + + expectedRequest := database.Request{DbName: expectedDBName, DbCollectionName: expectedSubstream, GroupId:expectedGroupID, DatasetOp:true, Op: "next"} + suite.mock_db.On("ProcessRequest", expectedRequest).Return([]byte("Hello"), nil) + + logger.MockLog.On("Debug", mock.MatchedBy(containsMatcher("processing request next in "+expectedDBName))) doRequest("/database/" + expectedBeamtimeId + "/" + expectedStream + "/" + expectedSubstream + "/" + expectedGroupID + "/next" + correctTokenSuffix + "&dataset=true") } diff --git a/broker/src/asapo_broker/server/request_common.go b/broker/src/asapo_broker/server/request_common.go index b8314701084a70177c0f762c0f32ca90c8b12993..28f2bc38d729c1cc14cafc6e696bdf71cfc9ccd8 100644 --- a/broker/src/asapo_broker/server/request_common.go +++ b/broker/src/asapo_broker/server/request_common.go @@ -4,6 +4,7 @@ import ( "asapo_common/logger" "errors" "net/http" + "strconv" ) func writeAuthAnswer(w http.ResponseWriter, requestName string, db_name string, err string) { @@ -13,7 +14,7 @@ func writeAuthAnswer(w http.ResponseWriter, requestName string, db_name string, w.Write([]byte(err)) } -func ValueTrue(r *http.Request, key string) bool { +func valueTrue(r *http.Request, key string) bool { val := r.URL.Query().Get(key) if len(val) == 0 { return false @@ -25,8 +26,21 @@ func ValueTrue(r *http.Request, key string) bool { return false } -func datasetRequested(r *http.Request) bool { - return ValueTrue(r, "dataset") +func valueInt(r *http.Request, key string) int { + val := r.URL.Query().Get(key) + if len(val) == 0 { + return 0 + } + + i, err := strconv.Atoi(val) + if err != nil { + return 0 + } + return i +} + +func datasetRequested(r *http.Request) (bool,int) { + return valueTrue(r, "dataset"),valueInt(r,"minsize") } func testAuth(r *http.Request, beamtime_id string) error { diff --git a/broker/src/asapo_broker/server/server_test.go b/broker/src/asapo_broker/server/server_test.go index d1c9957df9504d5ad8deb6b6bc37c4136edc16ef..3a7243782c43ca7e5aaae7c7a98f638c547c7ae3 100644 --- a/broker/src/asapo_broker/server/server_test.go +++ b/broker/src/asapo_broker/server/server_test.go @@ -117,6 +117,13 @@ func TestReconnectDB(t *testing.T) { db = nil } +func TestErrorWhenReconnectNotConnectedDB(t *testing.T) { + err := ReconnectDb() + assert.NotNil(t, err, "error reconnect") + db = nil +} + + func TestCleanupDBWithoutInit(t *testing.T) { mock_db := setup() diff --git a/broker/src/asapo_broker/server/statistics.go b/broker/src/asapo_broker/server/statistics.go index 13b0103306968508b6c3141de9733d9d3cea7066..b5b8ce09d63d139fb78f9f3e1de62638bdd179e7 100644 --- a/broker/src/asapo_broker/server/statistics.go +++ b/broker/src/asapo_broker/server/statistics.go @@ -4,7 +4,6 @@ import ( log "asapo_common/logger" "fmt" "sync" - "time" ) type statisticsWriter interface { @@ -54,14 +53,3 @@ func (st *serverStatistics) WriteStatistic() (err error) { }() return st.Writer.Write(st) } - -func (st *serverStatistics) Monitor() { - for { - time.Sleep(1000 * time.Millisecond) - if err := st.WriteStatistic(); err != nil { - logstr := "sending statistics to " + settings.PerformanceDbServer + ", dbname: " + settings.PerformanceDbName - log.Error(logstr + " - " + err.Error()) - } - st.Reset() - } -} diff --git a/broker/src/asapo_broker/server/statistics_nottested.go b/broker/src/asapo_broker/server/statistics_nottested.go new file mode 100644 index 0000000000000000000000000000000000000000..c2728623d93b161305d5f9285cea9af751350466 --- /dev/null +++ b/broker/src/asapo_broker/server/statistics_nottested.go @@ -0,0 +1,19 @@ +//+build !test + +package server + +import ( + log "asapo_common/logger" + "time" +) + +func (st *serverStatistics) Monitor() { + for { + time.Sleep(1000 * time.Millisecond) + if err := st.WriteStatistic(); err != nil { + logstr := "sending statistics to " + settings.PerformanceDbServer + ", dbname: " + settings.PerformanceDbName + log.Error(logstr + " - " + err.Error()) + } + st.Reset() + } +} diff --git a/broker/src/asapo_broker/server/statistics_test.go b/broker/src/asapo_broker/server/statistics_test.go index 147c1ff90376ac630f69b3f076d21684e320bcf0..47cfa6155ae160183947d2534036280e8edccc4a 100644 --- a/broker/src/asapo_broker/server/statistics_test.go +++ b/broker/src/asapo_broker/server/statistics_test.go @@ -1,6 +1,7 @@ package server import ( + "errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "testing" @@ -28,17 +29,37 @@ func assertMockWriterExpectations(t *testing.T, mock_writer *mockWriter) { func TestWriteStatisticsOK(t *testing.T) { mock_writer := new(mockWriter) statistics.Writer = mock_writer - statistics.Reset() - statistics.IncreaseCounter() + mock_writer.On("Init").Return(nil) mock_writer.On("Write", &statistics).Return(nil) + statistics.Init() + statistics.Reset() + statistics.IncreaseCounter() + counter := statistics.GetCounter() + err := statistics.WriteStatistic() assert.Nil(t, err, "Statistics written") + assert.Equal(t, 1, counter, "counter") + + assertMockWriterExpectations(t, mock_writer) +} + +func TestInitError(t *testing.T) { + mock_writer := new(mockWriter) + statistics.Writer = mock_writer + + mock_writer.On("Init").Return(errors.New("error")) + + statistics.Init() + + err := statistics.WriteStatistic() + assert.NotNil(t, err, "Statistics init error") assertMockWriterExpectations(t, mock_writer) } + func TestWriteStatisticsCatchesError(t *testing.T) { statistics.Writer = nil diff --git a/common/cpp/include/common/data_structs.h b/common/cpp/include/common/data_structs.h index b44afb9833e37b0db1177edbda611999d3923da0..c17921eb780068ba8ba2c85c996e62f16457baf8 100644 --- a/common/cpp/include/common/data_structs.h +++ b/common/cpp/include/common/data_structs.h @@ -64,6 +64,7 @@ using IdList = std::vector<uint64_t>; struct DataSet { uint64_t id; + uint64_t expected_size; FileInfos content; bool SetFromJson(const std::string& json_string); }; diff --git a/common/cpp/src/data_structs/data_structs.cpp b/common/cpp/src/data_structs/data_structs.cpp index 72cb5fddfb191bae1905b96a3f8464fe3e541d01..65c2c4cebc0c4da797c128bb3896d035b9302fed 100644 --- a/common/cpp/src/data_structs/data_structs.cpp +++ b/common/cpp/src/data_structs/data_structs.cpp @@ -92,6 +92,7 @@ bool DataSet::SetFromJson(const std::string &json_string) { std::vector<std::string> vec_fi_endcoded; Error parse_err; (parse_err = parser.GetArrayRawStrings("images", &vec_fi_endcoded)) || + (parse_err = parser.GetUInt64("size", &expected_size)) || (parse_err = parser.GetUInt64("_id", &id)); if (parse_err) { *this = old; diff --git a/common/cpp/src/database/mongodb_client.cpp b/common/cpp/src/database/mongodb_client.cpp index a2ba00c32ea1b19b6a5394e922da166fe6a1bee3..ab20bd80aec5855814a3670b54edea2edb3a8f0d 100644 --- a/common/cpp/src/database/mongodb_client.cpp +++ b/common/cpp/src/database/mongodb_client.cpp @@ -335,9 +335,9 @@ Error MongoDBClient::GetById(const std::string &collection, uint64_t id, FileInf return nullptr; } -Error MongoDBClient::GetDataSetById(const std::string &collection, uint64_t set_id, uint64_t id, FileInfo* file) const { +Error MongoDBClient::GetDataSetById(const std::string &collection, uint64_t id_in_set, uint64_t id, FileInfo* file) const { std::string record_str; - auto err = GetRecordFromDb(collection, set_id, GetRecordMode::kById, &record_str); + auto err = GetRecordFromDb(collection, id, GetRecordMode::kById, &record_str); if (err) { return err; } @@ -348,7 +348,7 @@ Error MongoDBClient::GetDataSetById(const std::string &collection, uint64_t set_ } for (const auto &fileinfo : dataset.content) { - if (fileinfo.id == id) { + if (fileinfo.id == id_in_set) { *file = fileinfo; return nullptr; } diff --git a/common/cpp/src/database/mongodb_client.h b/common/cpp/src/database/mongodb_client.h index 8c15a025417262570f35127717664ca09f439475..f06b4f9479e33a6b54ef9c7d497e2b1f3c2e6b8d 100644 --- a/common/cpp/src/database/mongodb_client.h +++ b/common/cpp/src/database/mongodb_client.h @@ -48,7 +48,7 @@ class MongoDBClient final : public Database { bool ignore_duplicates) const override; Error Upsert(const std::string& collection, uint64_t id, const uint8_t* data, uint64_t size) const override; Error GetById(const std::string& collection, uint64_t id, FileInfo* file) const override; - Error GetDataSetById(const std::string& collection, uint64_t set_id, uint64_t id, FileInfo* file) const override; + Error GetDataSetById(const std::string& collection, uint64_t id_in_set, uint64_t id, FileInfo* file) const override; Error GetStreamInfo(const std::string& collection, StreamInfo* info) const override; Error GetLastStream(StreamInfo* info) const override; ~MongoDBClient() override; diff --git a/common/go/src/asapo_common/utils/helpers.go b/common/go/src/asapo_common/utils/helpers.go index 94f0fdfa695044120d6c7c19ef334d0c102a1fa8..9b7dc20936f9c4da8d56825b2cafcc2ea317dcf9 100644 --- a/common/go/src/asapo_common/utils/helpers.go +++ b/common/go/src/asapo_common/utils/helpers.go @@ -25,6 +25,16 @@ func MapToJson(res interface{}) ([]byte, error) { } } +func InterfaceToInt64(val interface{}) (int64, bool) { + val64, ok := val.(int64) + var valf64 float64 + if !ok { // we need this (at least for tests) since by default values are float in mongo + valf64, ok = val.(float64) + val64 = int64(valf64) + } + return val64, ok +} + func ReadJsonFromFile(fname string, config interface{}) error { content, err := ioutil.ReadFile(fname) if err != nil { diff --git a/common/go/src/asapo_common/utils/status_codes.go b/common/go/src/asapo_common/utils/status_codes.go index 9f6e061622fe87e82f779e6bc871cca099b29dc9..7002a963e250b20b9af88c1125b54c2993215aca 100644 --- a/common/go/src/asapo_common/utils/status_codes.go +++ b/common/go/src/asapo_common/utils/status_codes.go @@ -9,7 +9,8 @@ const ( const ( //error codes StatusTransactionInterrupted = http.StatusInternalServerError - StatusServiceUnavailable = http.StatusNotFound + StatusServiceUnavailable = http.StatusNotFound StatusWrongInput = http.StatusBadRequest StatusNoData = http.StatusConflict + StatusPartialData = http.StatusPartialContent ) diff --git a/consumer/api/cpp/include/consumer/consumer_error.h b/consumer/api/cpp/include/consumer/consumer_error.h index 2cb3929c5054416000bd109d952e869bcfd6e83f..25531f196ccf0830b3042879a77abe05d17241ef 100644 --- a/consumer/api/cpp/include/consumer/consumer_error.h +++ b/consumer/api/cpp/include/consumer/consumer_error.h @@ -13,12 +13,19 @@ enum class ConsumerErrorType { kUnavailableService, kInterruptedTransaction, kLocalIOError, - kWrongInput + kWrongInput, + kPartialData }; using ConsumerErrorTemplate = ServiceErrorTemplate<ConsumerErrorType, ErrorType::kConsumerError>; +class PartialErrorData : public CustomErrorData { + public: + uint64_t id; + uint64_t expected_size; +}; + class ConsumerErrorData : public CustomErrorData { public: uint64_t id; @@ -29,6 +36,13 @@ class ConsumerErrorData : public CustomErrorData { namespace ConsumerErrorTemplates { + +auto const kPartialData = ConsumerErrorTemplate { + "partial data", ConsumerErrorType::kPartialData +}; + + + auto const kLocalIOError = ConsumerErrorTemplate { "local i/o error", ConsumerErrorType::kLocalIOError }; diff --git a/consumer/api/cpp/include/consumer/data_broker.h b/consumer/api/cpp/include/consumer/data_broker.h index e76881e85a2419b48257f042006cf7dfb4afc850..e82175856fd94de1775fa663daf3f2f22955d180 100644 --- a/consumer/api/cpp/include/consumer/data_broker.h +++ b/consumer/api/cpp/include/consumer/data_broker.h @@ -122,29 +122,33 @@ class DataBroker { /*! \param err - will be set to error data cannot be read, nullptr otherwise. \param group_id - group id to use. + \param substream - substream to use ("" for default). + \param min_size - wait until dataset has min_size data tuples (0 for maximum size) \return DataSet - information about the dataset - */ - virtual DataSet GetNextDataset(std::string group_id, Error* err) = 0; - virtual DataSet GetNextDataset(std::string group_id, std::string substream, Error* err) = 0; - //! Receive last available completed dataset. + */ + virtual DataSet GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) = 0; + virtual DataSet GetNextDataset(std::string group_id, uint64_t min_size, Error* err) = 0; + //! Receive last available dataset which has min_size data tuples. /*! \param err - will be set to error data cannot be read, nullptr otherwise. - \param group_id - group id to use. + \param substream - substream to use ("" for default). + \param min_size - amount of data tuples in dataset (0 for maximum size) \return DataSet - information about the dataset */ - virtual DataSet GetLastDataset(std::string group_id, Error* err) = 0; - virtual DataSet GetLastDataset(std::string group_id, std::string substream, Error* err) = 0; + virtual DataSet GetLastDataset(std::string substream, uint64_t min_size, Error* err) = 0; + virtual DataSet GetLastDataset(uint64_t min_size, Error* err) = 0; //! Receive dataset by id. /*! \param id - dataset id - \param err - will be set to error data cannot be read or dataset is incomplete, nullptr otherwise. - \param group_id - group id to use. + \param err - will be set to error data cannot be read or dataset size less than min_size, nullptr otherwise. + \param substream - substream to use ("" for default). + \param min_size - wait until dataset has min_size data tuples (0 for maximum size) \return DataSet - information about the dataset */ - virtual DataSet GetDatasetById(uint64_t id, std::string group_id, Error* err) = 0; - virtual DataSet GetDatasetById(uint64_t id, std::string group_id, std::string substream, Error* err) = 0; + virtual DataSet GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) = 0; + virtual DataSet GetDatasetById(uint64_t id, uint64_t min_size, Error* err) = 0; //! Receive single image by id. /*! @@ -153,8 +157,8 @@ class DataBroker { \param data - where to store image data. Can be set to nullptr only image metadata is needed. \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise. */ - virtual Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) = 0; - virtual Error GetById(uint64_t id, FileInfo* info, std::string group_id, std::string substream, FileData* data) = 0; + virtual Error GetById(uint64_t id, FileInfo* info, FileData* data) = 0; + virtual Error GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) = 0; //! Receive id of last acknowledged data tuple /*! @@ -169,12 +173,11 @@ class DataBroker { //! Receive last available image. /*! \param info - where to store image metadata. Can be set to nullptr only image data is needed. - \param group_id - group id to use. \param data - where to store image data. Can be set to nullptr only image metadata is needed. \return Error if both pointers are nullptr or data cannot be read, nullptr otherwise. */ - virtual Error GetLast(FileInfo* info, std::string group_id, FileData* data) = 0; - virtual Error GetLast(FileInfo* info, std::string group_id, std::string substream, FileData* data) = 0; + virtual Error GetLast(FileInfo* info, FileData* data) = 0; + virtual Error GetLast(FileInfo* info, std::string substream, FileData* data) = 0; //! Get all images matching the query. /*! diff --git a/consumer/api/cpp/src/server_data_broker.cpp b/consumer/api/cpp/src/server_data_broker.cpp index 017f89e2672b0d15508e432d97a3ccb1c726bc00..0d9519cf0719963a5e8c2c8d87c9d725b56f2c96 100644 --- a/consumer/api/cpp/src/server_data_broker.cpp +++ b/consumer/api/cpp/src/server_data_broker.cpp @@ -19,17 +19,42 @@ namespace asapo { const std::string ServerDataBroker::kBrokerServiceName = "asapo-broker"; const std::string ServerDataBroker::kFileTransferServiceName = "asapo-file-transfer"; -Error GetNoDataResponseFromJson(const std::string& json_string, ConsumerErrorData* data) { +Error GetNoDataResponseFromJson(const std::string &json_string, ConsumerErrorData* data) { JsonStringParser parser(json_string); Error err; if ((err = parser.GetUInt64("id", &data->id)) || (err = parser.GetUInt64("id_max", &data->id_max)) - || (err = parser.GetString("next_substream", &data->next_substream))) { + || (err = parser.GetString("next_substream", &data->next_substream))) { return err; } return nullptr; } -Error ConsumerErrorFromNoDataResponse(const std::string& response) { +Error GetPartialDataResponseFromJson(const std::string &json_string, PartialErrorData* data) { + Error err; + auto parser = JsonStringParser(json_string); + uint64_t id,size; + if ((err = parser.GetUInt64("size", &size)) || + (err = parser.GetUInt64("_id", &id))) { + return err; + } + data->id = id; + data->expected_size = size; + return nullptr; +} + +Error ConsumerErrorFromPartialDataResponse(const std::string &response) { + PartialErrorData data; + auto parse_error = GetPartialDataResponseFromJson(response, &data); + if (parse_error) { + return ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response - " + response); + } + auto err = ConsumerErrorTemplates::kPartialData.Generate(); + PartialErrorData* error_data = new PartialErrorData{data}; + err->SetCustomData(std::unique_ptr<CustomErrorData>{error_data}); + return err; +} + +Error ConsumerErrorFromNoDataResponse(const std::string &response) { if (response.find("get_record_by_id") != std::string::npos) { ConsumerErrorData data; auto parse_error = GetNoDataResponseFromJson(response, &data); @@ -44,41 +69,35 @@ Error ConsumerErrorFromNoDataResponse(const std::string& response) { err = ConsumerErrorTemplates::kNoData.Generate(); } ConsumerErrorData* error_data = new ConsumerErrorData{data}; - err->SetCustomData(std::unique_ptr<CustomErrorData> {error_data}); + err->SetCustomData(std::unique_ptr<CustomErrorData>{error_data}); return err; } return ConsumerErrorTemplates::kNoData.Generate(); } -Error ConsumerErrorFromHttpCode(const RequestOutput* response, const HttpCode& code) { +Error ConsumerErrorFromHttpCode(const RequestOutput* response, const HttpCode &code) { switch (code) { - case HttpCode::OK: - return nullptr; - case HttpCode::BadRequest: - return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string()); - case HttpCode::Unauthorized: - return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string()); - case HttpCode::InternalServerError: - return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string()); - case HttpCode::NotFound: - return ConsumerErrorTemplates::kUnavailableService.Generate(response->to_string()); - case HttpCode::Conflict: - return ConsumerErrorFromNoDataResponse(response->to_string()); - default: - return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string()); - } -} -Error ConsumerErrorFromServerError(const Error& server_err) { + case HttpCode::OK:return nullptr; + case HttpCode::PartialContent:return ConsumerErrorFromPartialDataResponse(response->to_string()); + case HttpCode::BadRequest:return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string()); + case HttpCode::Unauthorized:return ConsumerErrorTemplates::kWrongInput.Generate(response->to_string()); + case HttpCode::InternalServerError:return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string()); + case HttpCode::NotFound:return ConsumerErrorTemplates::kUnavailableService.Generate(response->to_string()); + case HttpCode::Conflict:return ConsumerErrorFromNoDataResponse(response->to_string()); + default:return ConsumerErrorTemplates::kInterruptedTransaction.Generate(response->to_string()); + } +} +Error ConsumerErrorFromServerError(const Error &server_err) { if (server_err == HttpErrorTemplates::kTransferError) { return ConsumerErrorTemplates::kInterruptedTransaction.Generate( - "error processing request: " + server_err->Explain()); + "error processing request: " + server_err->Explain()); } else { return ConsumerErrorTemplates::kUnavailableService.Generate( - "error processing request: " + server_err->Explain()); + "error processing request: " + server_err->Explain()); } } -Error ProcessRequestResponce(const Error& server_err, const RequestOutput* response, const HttpCode& code) { +Error ProcessRequestResponce(const Error &server_err, const RequestOutput* response, const HttpCode &code) { if (server_err != nullptr) { return ConsumerErrorFromServerError(server_err); } @@ -113,41 +132,39 @@ NetworkConnectionType ServerDataBroker::CurrentConnectionType() const { return current_connection_type_; } - std::string ServerDataBroker::RequestWithToken(std::string uri) { return std::move(uri) + "?token=" + source_credentials_.user_token; } -Error ServerDataBroker::ProcessPostRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code) { +Error ServerDataBroker::ProcessPostRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) { Error err; switch (request.output_mode) { - case OutputDataMode::string: - response->string_output = - httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params, - request.cookie, - request.body, - code, - &err); - break; - case OutputDataMode::array: - err = - httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params, request.cookie, - request.body, &response->data_output, response->data_output_size, code); - break; - default: - break; + case OutputDataMode::string: + response->string_output = + httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params, + request.cookie, + request.body, + code, + &err); + break; + case OutputDataMode::array: + err = + httpclient__->Post(RequestWithToken(request.host + request.api) + request.extra_params, request.cookie, + request.body, &response->data_output, response->data_output_size, code); + break; + default:break; } return err; } -Error ServerDataBroker::ProcessGetRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code) { +Error ServerDataBroker::ProcessGetRequest(const RequestInfo &request, RequestOutput* response, HttpCode* code) { Error err; response->string_output = httpclient__->Get(RequestWithToken(request.host + request.api) + request.extra_params, code, &err); return err; } -Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInfo& request, std::string* service_uri) { +Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInfo &request, std::string* service_uri) { Error err; HttpCode code; if (request.post) { @@ -161,7 +178,7 @@ Error ServerDataBroker::ProcessRequest(RequestOutput* response, const RequestInf return ProcessRequestResponce(err, response, code); } -Error ServerDataBroker::DiscoverService(const std::string& service_name, std::string* uri_to_set) { +Error ServerDataBroker::DiscoverService(const std::string &service_name, std::string* uri_to_set) { if (!uri_to_set->empty()) { return nullptr; } @@ -175,13 +192,30 @@ Error ServerDataBroker::DiscoverService(const std::string& service_name, std::st if (err != nullptr || uri_to_set->empty()) { uri_to_set->clear(); return ConsumerErrorTemplates::kUnavailableService.Generate(" on " + endpoint_ - + (err != nullptr ? ": " + err->Explain() - : "")); + + (err != nullptr ? ": " + err->Explain() + : "")); } return nullptr; } -bool ServerDataBroker::SwitchToGetByIdIfNoData(Error* err, const std::string& response, std::string* redirect_uri) { +bool ServerDataBroker::SwitchToGetByIdIfPartialData(Error* err, + const std::string &response, + std::string* group_id, + std::string* redirect_uri) { + if (*err == ConsumerErrorTemplates::kPartialData) { + auto error_data = static_cast<const PartialErrorData*>((*err)->GetCustomData()); + if (error_data == nullptr) { + *err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response - " + response); + return false; + } + *redirect_uri = std::to_string(error_data->id); + *group_id = "0"; + return true; + } + return false; +} + +bool ServerDataBroker::SwitchToGetByIdIfNoData(Error* err, const std::string &response,std::string* group_id, std::string* redirect_uri) { if (*err == ConsumerErrorTemplates::kNoData) { auto error_data = static_cast<const ConsumerErrorData*>((*err)->GetCustomData()); if (error_data == nullptr) { @@ -189,38 +223,40 @@ bool ServerDataBroker::SwitchToGetByIdIfNoData(Error* err, const std::string& re return false; } *redirect_uri = std::to_string(error_data->id); + *group_id = "0"; return true; } return false; } -RequestInfo ServerDataBroker::PrepareRequestInfo(std::string api_url, bool dataset) { +RequestInfo ServerDataBroker::PrepareRequestInfo(std::string api_url, bool dataset, uint64_t min_size) { RequestInfo ri; ri.host = current_broker_uri_; ri.api = std::move(api_url); if (dataset) { ri.extra_params = "&dataset=true"; + ri.extra_params += "&minsize="+std::to_string(min_size); } return ri; } Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string group_id, std::string substream, GetImageServerOperation op, - bool dataset) { + bool dataset, uint64_t min_size) { std::string request_suffix = OpToUriCmd(op); + std::string request_group = OpToUriCmd(op); std::string request_api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream - + "/" + std::move(substream) + - +"/" + std::move(group_id) + "/"; + + "/" + std::move(substream); uint64_t elapsed_ms = 0; Error no_data_error; while (true) { auto start = system_clock::now(); auto err = DiscoverService(kBrokerServiceName, ¤t_broker_uri_); if (err == nullptr) { - auto ri = PrepareRequestInfo(request_api + request_suffix, dataset); + auto ri = PrepareRequestInfo(request_api + "/" + group_id + "/" + request_suffix, dataset, min_size); if (request_suffix == "next" && resend_) { ri.extra_params = ri.extra_params + "&resend_nacks=true" + "&delay_sec=" + - std::to_string(delay_sec_) + "&resend_attempts=" + std::to_string(resend_attempts_); + std::to_string(delay_sec_) + "&resend_attempts=" + std::to_string(resend_attempts_); } RequestOutput output; err = ProcessRequest(&output, ri, ¤t_broker_uri_); @@ -235,7 +271,8 @@ Error ServerDataBroker::GetRecordFromServer(std::string* response, std::string g } if (request_suffix == "next") { - auto save_error = SwitchToGetByIdIfNoData(&err, *response, &request_suffix); + auto save_error = SwitchToGetByIdIfNoData(&err, *response, &group_id, &request_suffix) + || SwitchToGetByIdIfPartialData(&err, *response, &group_id, &request_suffix); if (err == ConsumerErrorTemplates::kInterruptedTransaction) { return err; } @@ -265,14 +302,14 @@ Error ServerDataBroker::GetNext(FileInfo* info, std::string group_id, std::strin data); } -Error ServerDataBroker::GetLast(FileInfo* info, std::string group_id, FileData* data) { - return GetLast(info, std::move(group_id), kDefaultSubstream, data); +Error ServerDataBroker::GetLast(FileInfo* info, FileData* data) { + return GetLast(info, kDefaultSubstream, data); } -Error ServerDataBroker::GetLast(FileInfo* info, std::string group_id, std::string substream, FileData* data) { +Error ServerDataBroker::GetLast(FileInfo* info, std::string substream, FileData* data) { return GetImageFromServer(GetImageServerOperation::GetLast, 0, - std::move(group_id), + "0", std::move(substream), info, data); @@ -280,12 +317,9 @@ Error ServerDataBroker::GetLast(FileInfo* info, std::string group_id, std::strin std::string ServerDataBroker::OpToUriCmd(GetImageServerOperation op) { switch (op) { - case GetImageServerOperation::GetNext: - return "next"; - case GetImageServerOperation::GetLast: - return "last"; - default: - return "last"; + case GetImageServerOperation::GetNext:return "next"; + case GetImageServerOperation::GetLast:return "last"; + default:return "last"; } } @@ -406,7 +440,7 @@ std::string ServerDataBroker::GenerateNewGroupId(Error* err) { return BrokerRequestWithTimeout(ri, err); } -Error ServerDataBroker::ServiceRequestWithTimeout(const std::string& service_name, +Error ServerDataBroker::ServiceRequestWithTimeout(const std::string &service_name, std::string* service_uri, RequestInfo request, RequestOutput* response) { @@ -468,9 +502,6 @@ RequestInfo ServerDataBroker::CreateFileTransferRequest(const FileInfo* info) co std::string ServerDataBroker::BrokerRequestWithTimeout(RequestInfo request, Error* err) { RequestOutput response; *err = ServiceRequestWithTimeout(kBrokerServiceName, ¤t_broker_uri_, request, &response); - if (*err) { - return ""; - } return std::move(response.string_output); } @@ -489,7 +520,7 @@ Error ServerDataBroker::ResetLastReadMarker(std::string group_id, std::string su Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id, std::string substream) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + "/" - + std::move(substream) + "/" + std::move(group_id) + "/resetcounter"; + + std::move(substream) + "/" + std::move(group_id) + "/resetcounter"; ri.extra_params = "&value=" + std::to_string(value); ri.post = true; @@ -501,7 +532,7 @@ Error ServerDataBroker::SetLastReadMarker(uint64_t value, std::string group_id, uint64_t ServerDataBroker::GetCurrentSize(std::string substream, Error* err) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + - +"/" + std::move(substream) + "/size"; + +"/" + std::move(substream) + "/size"; auto responce = BrokerRequestWithTimeout(ri, err); if (*err) { return 0; @@ -518,32 +549,29 @@ uint64_t ServerDataBroker::GetCurrentSize(std::string substream, Error* err) { uint64_t ServerDataBroker::GetCurrentSize(Error* err) { return GetCurrentSize(kDefaultSubstream, err); } -Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) { +Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, FileData* data) { if (id == 0) { return ConsumerErrorTemplates::kWrongInput.Generate("id should be positive"); } - return GetById(id, info, std::move(group_id), kDefaultSubstream, data); + return GetById(id, info, kDefaultSubstream, data); } -Error ServerDataBroker::GetById(uint64_t id, - FileInfo* info, - std::string group_id, - std::string substream, - FileData* data) { - return GetImageFromServer(GetImageServerOperation::GetID, id, group_id, substream, info, data); +Error ServerDataBroker::GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) { + return GetImageFromServer(GetImageServerOperation::GetID, id, "0", substream, info, data); } Error ServerDataBroker::GetRecordFromServerById(uint64_t id, std::string* response, std::string group_id, std::string substream, - bool dataset) { + bool dataset, uint64_t min_size) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + - +"/" + std::move(substream) + - "/" + std::move( - group_id) + "/" + std::to_string(id); + +"/" + std::move(substream) + + "/" + std::move( + group_id) + "/" + std::to_string(id); if (dataset) { ri.extra_params += "&dataset=true"; + ri.extra_params += "&minsize="+std::to_string(min_size); } Error err; @@ -558,13 +586,12 @@ std::string ServerDataBroker::GetBeamtimeMeta(Error* err) { return BrokerRequestWithTimeout(ri, err); } -DataSet ServerDataBroker::DecodeDatasetFromResponse(std::string response, Error* err) { +DataSet DecodeDatasetFromResponse(std::string response, Error* err) { DataSet res; if (!res.SetFromJson(std::move(response))) { *err = ConsumerErrorTemplates::kInterruptedTransaction.Generate("malformed response:" + response); - return {0, FileInfos{}}; + return {0,0,FileInfos{}}; } else { - *err = nullptr; return res; } } @@ -572,7 +599,7 @@ DataSet ServerDataBroker::DecodeDatasetFromResponse(std::string response, Error* FileInfos ServerDataBroker::QueryImages(std::string query, std::string substream, Error* err) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + - "/" + std::move(substream) + "/0/queryimages"; + "/" + std::move(substream) + "/0/queryimages"; ri.post = true; ri.body = std::move(query); @@ -581,7 +608,7 @@ FileInfos ServerDataBroker::QueryImages(std::string query, std::string substream return FileInfos{}; } - auto dataset = DecodeDatasetFromResponse("{\"_id\":0, \"images\":" + response + "}", err); + auto dataset = DecodeDatasetFromResponse("{\"_id\":0,\"size\":0, \"images\":" + response + "}", err); return dataset.content; } @@ -589,45 +616,46 @@ FileInfos ServerDataBroker::QueryImages(std::string query, Error* err) { return QueryImages(std::move(query), kDefaultSubstream, err); } -DataSet ServerDataBroker::GetNextDataset(std::string group_id, Error* err) { - return GetNextDataset(std::move(group_id), kDefaultSubstream, err); +DataSet ServerDataBroker::GetNextDataset(std::string group_id, uint64_t min_size, Error* err) { + return GetNextDataset(std::move(group_id), kDefaultSubstream, min_size, err); } -DataSet ServerDataBroker::GetNextDataset(std::string group_id, std::string substream, Error* err) { - return GetDatasetFromServer(GetImageServerOperation::GetNext, 0, std::move(group_id), std::move(substream), err); +DataSet ServerDataBroker::GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) { + return GetDatasetFromServer(GetImageServerOperation::GetNext, 0, std::move(group_id), std::move(substream),min_size, err); } -DataSet ServerDataBroker::GetLastDataset(std::string group_id, std::string substream, Error* err) { - return GetDatasetFromServer(GetImageServerOperation::GetLast, 0, std::move(group_id), std::move(substream), err); +DataSet ServerDataBroker::GetLastDataset(std::string substream, uint64_t min_size, Error* err) { + return GetDatasetFromServer(GetImageServerOperation::GetLast, 0, "0", std::move(substream),min_size, err); } -DataSet ServerDataBroker::GetLastDataset(std::string group_id, Error* err) { - return GetLastDataset(std::move(group_id), kDefaultSubstream, err); +DataSet ServerDataBroker::GetLastDataset(uint64_t min_size, Error* err) { + return GetLastDataset(kDefaultSubstream, min_size, err); } DataSet ServerDataBroker::GetDatasetFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream, + uint64_t min_size, Error* err) { FileInfos infos; std::string response; if (op == GetImageServerOperation::GetID) { - *err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(substream), true); + *err = GetRecordFromServerById(id, &response, std::move(group_id), std::move(substream), true, min_size); } else { - *err = GetRecordFromServer(&response, std::move(group_id), std::move(substream), op, true); + *err = GetRecordFromServer(&response, std::move(group_id), std::move(substream), op, true, min_size); } - if (*err != nullptr) { - return {0, FileInfos{}}; + if (*err != nullptr && *err!=ConsumerErrorTemplates::kPartialData) { + return {0, 0,FileInfos{}}; } return DecodeDatasetFromResponse(response, err); } -DataSet ServerDataBroker::GetDatasetById(uint64_t id, std::string group_id, Error* err) { - return GetDatasetById(id, std::move(group_id), kDefaultSubstream, err); +DataSet ServerDataBroker::GetDatasetById(uint64_t id, uint64_t min_size, Error* err) { + return GetDatasetById(id, kDefaultSubstream, min_size, err); } -DataSet ServerDataBroker::GetDatasetById(uint64_t id, std::string group_id, std::string substream, Error* err) { - return GetDatasetFromServer(GetImageServerOperation::GetID, id, std::move(group_id), std::move(substream), err); +DataSet ServerDataBroker::GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) { + return GetDatasetFromServer(GetImageServerOperation::GetID, id, "0", std::move(substream), min_size, err); } StreamInfos ParseSubstreamsFromResponse(std::string response, Error* err) { @@ -637,14 +665,14 @@ StreamInfos ParseSubstreamsFromResponse(std::string response, Error* err) { Error parse_err; *err = parser.GetArrayRawStrings("substreams", &substreams_endcoded); if (*err) { - return StreamInfos {}; + return StreamInfos{}; } for (auto substream_encoded : substreams_endcoded) { StreamInfo si; - auto ok = si.SetFromJson(substream_encoded,false); + auto ok = si.SetFromJson(substream_encoded, false); if (!ok) { - *err = TextError("cannot parse "+substream_encoded); - return StreamInfos {}; + *err = TextError("cannot parse " + substream_encoded); + return StreamInfos{}; } substreams.emplace_back(si); } @@ -657,12 +685,12 @@ StreamInfos ServerDataBroker::GetSubstreamList(std::string from, Error* err) { ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + "/0/substreams"; ri.post = false; if (!from.empty()) { - ri.extra_params="&from=" + from; + ri.extra_params = "&from=" + from; } auto response = BrokerRequestWithTimeout(ri, err); if (*err) { - return StreamInfos {}; + return StreamInfos{}; } return ParseSubstreamsFromResponse(std::move(response), err); @@ -691,13 +719,13 @@ RequestInfo ServerDataBroker::CreateFolderTokenRequest() const { ri.post = true; ri.body = "{\"Folder\":\"" + source_path_ + "\",\"BeamtimeId\":\"" + source_credentials_.beamtime_id + "\",\"Token\":\"" - + - source_credentials_.user_token + "\"}"; + + + source_credentials_.user_token + "\"}"; return ri; } Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData* data, - bool retry_with_new_token) { + bool retry_with_new_token) { auto err = UpdateFolderTokenIfNeeded(retry_with_new_token); if (err) { return err; @@ -706,7 +734,7 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData* if (info->size == 0) { err = FtsSizeRequestWithTimeout(info); if (err == ConsumerErrorTemplates::kWrongInput - && !retry_with_new_token) { // token expired? Refresh token and try again. + && !retry_with_new_token) { // token expired? Refresh token and try again. return GetDataFromFileTransferService(info, data, true); } if (err) { @@ -716,7 +744,7 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData* err = FtsRequestWithTimeout(info, data); if (err == ConsumerErrorTemplates::kWrongInput - && !retry_with_new_token) { // token expired? Refresh token and try again. + && !retry_with_new_token) { // token expired? Refresh token and try again. return GetDataFromFileTransferService(info, data, true); } return err; @@ -725,8 +753,8 @@ Error ServerDataBroker::GetDataFromFileTransferService(FileInfo* info, FileData* Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::string substream) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + - +"/" + std::move(substream) + - "/" + std::move(group_id) + "/" + std::to_string(id); + +"/" + std::move(substream) + + "/" + std::move(group_id) + "/" + std::to_string(id); ri.post = true; ri.body = "{\"Op\":\"ackimage\"}"; @@ -736,14 +764,14 @@ Error ServerDataBroker::Acknowledge(std::string group_id, uint64_t id, std::stri } IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id, - std::string substream, - uint64_t from_id, - uint64_t to_id, - Error* error) { + std::string substream, + uint64_t from_id, + uint64_t to_id, + Error* error) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + - +"/" + std::move(substream) + - "/" + std::move(group_id) + "/nacks"; + +"/" + std::move(substream) + + "/" + std::move(group_id) + "/nacks"; ri.extra_params = "&from=" + std::to_string(from_id) + "&to=" + std::to_string(to_id); auto json_string = BrokerRequestWithTimeout(ri, error); @@ -761,17 +789,17 @@ IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id, } IdList ServerDataBroker::GetUnacknowledgedTupleIds(std::string group_id, - uint64_t from_id, - uint64_t to_id, - Error* error) { + uint64_t from_id, + uint64_t to_id, + Error* error) { return GetUnacknowledgedTupleIds(std::move(group_id), kDefaultSubstream, from_id, to_id, error); } uint64_t ServerDataBroker::GetLastAcknowledgedTulpeId(std::string group_id, std::string substream, Error* error) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + - +"/" + std::move(substream) + - "/" + std::move(group_id) + "/lastack"; + +"/" + std::move(substream) + + "/" + std::move(group_id) + "/lastack"; auto json_string = BrokerRequestWithTimeout(ri, error); if (*error) { @@ -806,8 +834,8 @@ Error ServerDataBroker::NegativeAcknowledge(std::string group_id, std::string substream) { RequestInfo ri; ri.api = "/database/" + source_credentials_.beamtime_id + "/" + source_credentials_.stream + - +"/" + std::move(substream) + - "/" + std::move(group_id) + "/" + std::to_string(id); + +"/" + std::move(substream) + + "/" + std::move(group_id) + "/" + std::to_string(id); ri.post = true; ri.body = R"({"Op":"negackimage","Params":{"DelaySec":)" + std::to_string(delay_sec) + "}}"; diff --git a/consumer/api/cpp/src/server_data_broker.h b/consumer/api/cpp/src/server_data_broker.h index d58c69df7e48e194bccf14dcbdf10208d2470949..53a938813ca74ecb9c3ec126f95bab20627389f1 100644 --- a/consumer/api/cpp/src/server_data_broker.h +++ b/consumer/api/cpp/src/server_data_broker.h @@ -47,7 +47,8 @@ struct RequestOutput { Error ProcessRequestResponce(const Error& server_err, const RequestOutput* response, const HttpCode& code); Error ConsumerErrorFromNoDataResponse(const std::string& response); - +Error ConsumerErrorFromPartialDataResponse(const std::string& response); +DataSet DecodeDatasetFromResponse(std::string response, Error* err); class ServerDataBroker final : public asapo::DataBroker { public: @@ -77,8 +78,8 @@ class ServerDataBroker final : public asapo::DataBroker { Error GetNext(FileInfo* info, std::string group_id, FileData* data) override; Error GetNext(FileInfo* info, std::string group_id, std::string substream, FileData* data) override; - Error GetLast(FileInfo* info, std::string group_id, FileData* data) override; - Error GetLast(FileInfo* info, std::string group_id, std::string substream, FileData* data) override; + Error GetLast(FileInfo* info, FileData* data) override; + Error GetLast(FileInfo* info, std::string substream, FileData* data) override; std::string GenerateNewGroupId(Error* err) override; std::string GetBeamtimeMeta(Error* err) override; @@ -86,8 +87,8 @@ class ServerDataBroker final : public asapo::DataBroker { uint64_t GetCurrentSize(Error* err) override; uint64_t GetCurrentSize(std::string substream, Error* err) override; - Error GetById(uint64_t id, FileInfo* info, std::string group_id, FileData* data) override; - Error GetById(uint64_t id, FileInfo* info, std::string group_id, std::string substream, FileData* data) override; + Error GetById(uint64_t id, FileInfo* info, FileData* data) override; + Error GetById(uint64_t id, FileInfo* info, std::string substream, FileData* data) override; void SetTimeout(uint64_t timeout_ms) override; @@ -98,14 +99,14 @@ class ServerDataBroker final : public asapo::DataBroker { FileInfos QueryImages(std::string query, Error* err) override; FileInfos QueryImages(std::string query, std::string substream, Error* err) override; - DataSet GetNextDataset(std::string group_id, Error* err) override; - DataSet GetNextDataset(std::string group_id, std::string substream, Error* err) override; + DataSet GetNextDataset(std::string group_id, uint64_t min_size, Error* err) override; + DataSet GetNextDataset(std::string group_id, std::string substream, uint64_t min_size, Error* err) override; - DataSet GetLastDataset(std::string group_id, Error* err) override; - DataSet GetLastDataset(std::string group_id, std::string substream, Error* err) override; + DataSet GetLastDataset(uint64_t min_size, Error* err) override; + DataSet GetLastDataset(std::string substream, uint64_t min_size, Error* err) override; - DataSet GetDatasetById(uint64_t id, std::string group_id, Error* err) override; - DataSet GetDatasetById(uint64_t id, std::string group_id, std::string substream, Error* err) override; + DataSet GetDatasetById(uint64_t id, uint64_t min_size, Error* err) override; + DataSet GetDatasetById(uint64_t id, std::string substream, uint64_t min_size, Error* err) override; Error RetrieveData(FileInfo* info, FileData* data) override; @@ -124,17 +125,18 @@ class ServerDataBroker final : public asapo::DataBroker { static const std::string kFileTransferServiceName; std::string RequestWithToken(std::string uri); Error GetRecordFromServer(std::string* info, std::string group_id, std::string substream, GetImageServerOperation op, - bool dataset = false); + bool dataset = false, uint64_t min_size = 0); Error GetRecordFromServerById(uint64_t id, std::string* info, std::string group_id, std::string substream, - bool dataset = false); + bool dataset = false, uint64_t min_size = 0); Error GetDataIfNeeded(FileInfo* info, FileData* data); Error DiscoverService(const std::string& service_name, std::string* uri_to_set); - bool SwitchToGetByIdIfNoData(Error* err, const std::string& response, std::string* redirect_uri); + bool SwitchToGetByIdIfNoData(Error* err, const std::string& response, std::string* group_id,std::string* redirect_uri); + bool SwitchToGetByIdIfPartialData(Error* err, const std::string& response, std::string* group_id,std::string* redirect_uri); Error ProcessRequest(RequestOutput* response, const RequestInfo& request, std::string* service_uri); Error GetImageFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream, FileInfo* info, FileData* data); DataSet GetDatasetFromServer(GetImageServerOperation op, uint64_t id, std::string group_id, std::string substream, - Error* err); + uint64_t min_size, Error* err); bool DataCanBeInBuffer(const FileInfo* info); Error TryGetDataFromBuffer(const FileInfo* info, FileData* data); Error CreateNetClientAndTryToGetFile(const FileInfo* info, FileData* data); @@ -146,8 +148,7 @@ class ServerDataBroker final : public asapo::DataBroker { Error ProcessPostRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code); Error ProcessGetRequest(const RequestInfo& request, RequestOutput* response, HttpCode* code); - DataSet DecodeDatasetFromResponse(std::string response, Error* err); - RequestInfo PrepareRequestInfo(std::string api_url, bool dataset); + RequestInfo PrepareRequestInfo(std::string api_url, bool dataset, uint64_t min_size); std::string OpToUriCmd(GetImageServerOperation op); Error UpdateFolderTokenIfNeeded(bool ignore_existing); std::string endpoint_; diff --git a/consumer/api/cpp/unittests/test_server_broker.cpp b/consumer/api/cpp/unittests/test_server_broker.cpp index e1ddc85aa18ffb6a151384e04b7c089647115623..d7b1f95368ee612af813b987badfaf36b42c2bfb 100644 --- a/consumer/api/cpp/unittests/test_server_broker.cpp +++ b/consumer/api/cpp/unittests/test_server_broker.cpp @@ -45,9 +45,10 @@ namespace { TEST(FolderDataBroker, Constructor) { auto data_broker = - std::unique_ptr<ServerDataBroker> {new ServerDataBroker("test", "path", false, - asapo::SourceCredentials{asapo::SourceType::kProcessed,"beamtime_id", "", "", "token"}) - }; + std::unique_ptr<ServerDataBroker>{new ServerDataBroker("test", "path", false, + asapo::SourceCredentials{asapo::SourceType::kProcessed, + "beamtime_id", "", "", "token"}) + }; ASSERT_THAT(dynamic_cast<asapo::SystemIO*>(data_broker->io__.get()), Ne(nullptr)); ASSERT_THAT(dynamic_cast<asapo::CurlHttpClient*>(data_broker->httpclient__.get()), Ne(nullptr)); ASSERT_THAT(data_broker->net_client__.get(), Eq(nullptr)); @@ -56,118 +57,125 @@ TEST(FolderDataBroker, Constructor) { const uint8_t expected_value = 1; class ServerDataBrokerTests : public Test { - public: - std::unique_ptr<ServerDataBroker> data_broker, fts_data_broker; - NiceMock<MockIO> mock_io; - NiceMock<MockHttpClient> mock_http_client; - NiceMock<MockNetClient> mock_netclient; - FileInfo info; - std::string expected_server_uri = "test:8400"; - std::string expected_broker_uri = "asapo-broker:5005"; - std::string expected_fts_uri = "asapo-file-transfer:5008"; - std::string expected_token = "token"; - std::string expected_path = "/tmp/beamline/beamtime"; - std::string expected_filename = "filename"; - std::string expected_full_path = std::string("/tmp/beamline/beamtime") + asapo::kPathSeparator + expected_filename; - std::string expected_group_id = "groupid"; - std::string expected_stream = "stream"; - std::string expected_substream = "substream"; - std::string expected_metadata = "{\"meta\":1}"; - std::string expected_query_string = "bla"; - std::string expected_folder_token = "folder_token"; - std::string expected_beamtime_id = "beamtime_id"; - uint64_t expected_image_size = 100; - uint64_t expected_dataset_id = 1; - static const uint64_t expected_buf_id = 123; - std::string expected_next_substream = "nextsubstream"; - std::string expected_fts_query_string = "{\"Folder\":\"" + expected_path + "\",\"FileName\":\"" + expected_filename + - "\"}"; - std::string expected_cookie = "Authorization=Bearer " + expected_folder_token; - - void AssertSingleFileTransfer(); - void SetUp() override { - data_broker = std::unique_ptr<ServerDataBroker> { - new ServerDataBroker(expected_server_uri, expected_path, true, asapo::SourceCredentials{asapo::SourceType::kProcessed,expected_beamtime_id, "", expected_stream, expected_token}) - }; - fts_data_broker = std::unique_ptr<ServerDataBroker> { - new ServerDataBroker(expected_server_uri, expected_path, false, asapo::SourceCredentials{asapo::SourceType::kProcessed,expected_beamtime_id, "", expected_stream, expected_token}) - }; - data_broker->io__ = std::unique_ptr<IO> {&mock_io}; - data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient> {&mock_http_client}; - data_broker->net_client__ = std::unique_ptr<asapo::NetClient> {&mock_netclient}; - fts_data_broker->io__ = std::unique_ptr<IO> {&mock_io}; - fts_data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient> {&mock_http_client}; - fts_data_broker->net_client__ = std::unique_ptr<asapo::NetClient> {&mock_netclient}; - - } - void TearDown() override { - data_broker->io__.release(); - data_broker->httpclient__.release(); - data_broker->net_client__.release(); - fts_data_broker->io__.release(); - fts_data_broker->httpclient__.release(); - fts_data_broker->net_client__.release(); - - } - void MockGet(const std::string& response) { - EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_broker_uri), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(response) - )); - } - - void MockGetError() { - EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_broker_uri), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::NotFound), - SetArgPointee<2>(asapo::IOErrorTemplates::kUnknownIOError.Generate().release()), - Return("") - )); - } - void MockGetServiceUri(std::string service, std::string result) { - EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/" + service), _, - _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(result))); - } - - void MockBeforeFTS(FileData* data); - - void MockGetFTSUri() { - MockGetServiceUri("asapo-file-transfer", expected_fts_uri); - } - - void ExpectFolderToken(); - void ExpectFileTransfer(const asapo::ConsumerErrorTemplate* p_err_template); - void ExpectRepeatedFileTransfer(); - void ExpectIdList(bool error); - void ExpectLastAckId(bool empty_response); - - void MockGetBrokerUri() { - MockGetServiceUri("asapo-broker", expected_broker_uri); - } - void MockReadDataFromFile(int times = 1) { - if (times == 0) { - EXPECT_CALL(mock_io, GetDataFromFile_t(_, _, _)).Times(0); - return; - } - - EXPECT_CALL(mock_io, GetDataFromFile_t(expected_full_path, testing::Pointee(100), _)).Times(times). - WillRepeatedly(DoAll(SetArgPointee<2>(new asapo::SimpleError{"s"}), testing::Return(nullptr))); - } - FileInfo CreateFI(uint64_t buf_id = expected_buf_id) { - FileInfo fi; - fi.size = expected_image_size; - fi.id = 1; - fi.buf_id = buf_id; - fi.name = expected_filename; - fi.timestamp = std::chrono::system_clock::now(); - return fi; - } + public: + std::unique_ptr<ServerDataBroker> data_broker, fts_data_broker; + NiceMock<MockIO> mock_io; + NiceMock<MockHttpClient> mock_http_client; + NiceMock<MockNetClient> mock_netclient; + FileInfo info; + std::string expected_server_uri = "test:8400"; + std::string expected_broker_uri = "asapo-broker:5005"; + std::string expected_fts_uri = "asapo-file-transfer:5008"; + std::string expected_token = "token"; + std::string expected_path = "/tmp/beamline/beamtime"; + std::string expected_filename = "filename"; + std::string expected_full_path = std::string("/tmp/beamline/beamtime") + asapo::kPathSeparator + expected_filename; + std::string expected_group_id = "groupid"; + std::string expected_stream = "stream"; + std::string expected_substream = "substream"; + std::string expected_metadata = "{\"meta\":1}"; + std::string expected_query_string = "bla"; + std::string expected_folder_token = "folder_token"; + std::string expected_beamtime_id = "beamtime_id"; + uint64_t expected_image_size = 100; + uint64_t expected_dataset_id = 1; + static const uint64_t expected_buf_id = 123; + std::string expected_next_substream = "nextsubstream"; + std::string expected_fts_query_string = "{\"Folder\":\"" + expected_path + "\",\"FileName\":\"" + expected_filename + + "\"}"; + std::string expected_cookie = "Authorization=Bearer " + expected_folder_token; + + void AssertSingleFileTransfer(); + void SetUp() override { + data_broker = std::unique_ptr<ServerDataBroker>{ + new ServerDataBroker(expected_server_uri, + expected_path, + true, + asapo::SourceCredentials{asapo::SourceType::kProcessed, expected_beamtime_id, "", + expected_stream, expected_token}) + }; + fts_data_broker = std::unique_ptr<ServerDataBroker>{ + new ServerDataBroker(expected_server_uri, + expected_path, + false, + asapo::SourceCredentials{asapo::SourceType::kProcessed, expected_beamtime_id, "", + expected_stream, expected_token}) + }; + data_broker->io__ = std::unique_ptr<IO>{&mock_io}; + data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client}; + data_broker->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient}; + fts_data_broker->io__ = std::unique_ptr<IO>{&mock_io}; + fts_data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client}; + fts_data_broker->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient}; + + } + void TearDown() override { + data_broker->io__.release(); + data_broker->httpclient__.release(); + data_broker->net_client__.release(); + fts_data_broker->io__.release(); + fts_data_broker->httpclient__.release(); + fts_data_broker->net_client__.release(); + + } + void MockGet(const std::string &response, asapo::HttpCode return_code = HttpCode::OK) { + EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_broker_uri), _, _)).WillOnce(DoAll( + SetArgPointee<1>(return_code), + SetArgPointee<2>(nullptr), + Return(response) + )); + } + + void MockGetError() { + EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_broker_uri), _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::NotFound), + SetArgPointee<2>(asapo::IOErrorTemplates::kUnknownIOError.Generate().release()), + Return("") + )); + } + void MockGetServiceUri(std::string service, std::string result) { + EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/" + service), _, + _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(result))); + } + + void MockBeforeFTS(FileData* data); + + void MockGetFTSUri() { + MockGetServiceUri("asapo-file-transfer", expected_fts_uri); + } + + void ExpectFolderToken(); + void ExpectFileTransfer(const asapo::ConsumerErrorTemplate* p_err_template); + void ExpectRepeatedFileTransfer(); + void ExpectIdList(bool error); + void ExpectLastAckId(bool empty_response); + + void MockGetBrokerUri() { + MockGetServiceUri("asapo-broker", expected_broker_uri); + } + void MockReadDataFromFile(int times = 1) { + if (times == 0) { + EXPECT_CALL(mock_io, GetDataFromFile_t(_, _, _)).Times(0); + return; + } + + EXPECT_CALL(mock_io, GetDataFromFile_t(expected_full_path, testing::Pointee(100), _)).Times(times). + WillRepeatedly(DoAll(SetArgPointee<2>(new asapo::SimpleError{"s"}), testing::Return(nullptr))); + } + FileInfo CreateFI(uint64_t buf_id = expected_buf_id) { + FileInfo fi; + fi.size = expected_image_size; + fi.id = 1; + fi.buf_id = buf_id; + fi.name = expected_filename; + fi.timestamp = std::chrono::system_clock::now(); + return fi; + } }; - TEST_F(ServerDataBrokerTests, GetImageReturnsErrorOnWrongInput) { auto err = data_broker->GetNext(nullptr, "", nullptr); ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput)); @@ -177,39 +185,42 @@ TEST_F(ServerDataBrokerTests, DefaultStreamIsDetector) { data_broker->io__.release(); data_broker->httpclient__.release(); data_broker->net_client__.release(); - data_broker = std::unique_ptr<ServerDataBroker> { - new ServerDataBroker(expected_server_uri, expected_path, false, asapo::SourceCredentials{asapo::SourceType::kProcessed,"beamtime_id", "", "", expected_token}) + data_broker = std::unique_ptr<ServerDataBroker>{ + new ServerDataBroker(expected_server_uri, + expected_path, + false, + asapo::SourceCredentials{asapo::SourceType::kProcessed, "beamtime_id", "", "", + expected_token}) }; - data_broker->io__ = std::unique_ptr<IO> {&mock_io}; - data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient> {&mock_http_client}; - data_broker->net_client__ = std::unique_ptr<asapo::NetClient> {&mock_netclient}; + data_broker->io__ = std::unique_ptr<IO>{&mock_io}; + data_broker->httpclient__ = std::unique_ptr<asapo::HttpClient>{&mock_http_client}; + data_broker->net_client__ = std::unique_ptr<asapo::NetClient>{&mock_netclient}; MockGetBrokerUri(); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/detector/default/" + expected_group_id - + - "/next?token=" - + expected_token, _, - _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + EXPECT_CALL(mock_http_client, + Get_t(expected_broker_uri + "/database/beamtime_id/detector/default/" + expected_group_id + + + "/next?token=" + + expected_token, _, + _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); data_broker->GetNext(&info, expected_group_id, nullptr); } - - TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUri) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" - + expected_group_id + "/next?token=" - + expected_token, _, + + expected_group_id + "/next?token=" + + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); data_broker->GetNext(&info, expected_group_id, nullptr); } @@ -217,35 +228,35 @@ TEST_F(ServerDataBrokerTests, GetNextUsesCorrectUriWithSubstream) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/" + expected_group_id + "/next?token=" - + expected_token, _, + expected_substream + "/" + expected_group_id + "/next?token=" + + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); data_broker->GetNext(&info, expected_group_id, expected_substream, nullptr); } TEST_F(ServerDataBrokerTests, GetLastUsesCorrectUri) { MockGetBrokerUri(); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + "/last?token=" - + expected_token, _, - _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); - data_broker->GetLast(&info, expected_group_id, nullptr); + EXPECT_CALL(mock_http_client, + Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/last?token=" + + expected_token, _, + _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); + data_broker->GetLast(&info, nullptr); } TEST_F(ServerDataBrokerTests, GetImageReturnsEndOfStreamFromHttpClient) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}"))); + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"\"}"))); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -261,9 +272,10 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsStreamFinishedFromHttpClient) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"" + expected_next_substream + "\"}"))); + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"" + expected_next_substream + + "\"}"))); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -279,10 +291,9 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataFromHttpClient) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_substream\":\"""\"}"))); - + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":2,\"next_substream\":\"""\"}"))); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); auto err_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData()); @@ -298,9 +309,9 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsNotAuthorized) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Unauthorized), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::Unauthorized), + SetArgPointee<2>(nullptr), + Return(""))); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -312,9 +323,9 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsWrongResponseFromHttpClient) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("id"))); + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("id"))); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -325,9 +336,9 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsWrongResponseFromHttpClient) { TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerAddressNotFound) { EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll( - SetArgPointee<1>(HttpCode::NotFound), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::NotFound), + SetArgPointee<2>(nullptr), + Return(""))); data_broker->SetTimeout(100); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -338,9 +349,9 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerAddressNotFound) { TEST_F(ServerDataBrokerTests, GetImageReturnsIfBrokerUriEmpty) { EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); data_broker->SetTimeout(100); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -356,7 +367,8 @@ TEST_F(ServerDataBrokerTests, GetDoNotCallBrokerUriIfAlreadyFound) { data_broker->GetNext(&info, expected_group_id, nullptr); Mock::VerifyAndClearExpectations(&mock_http_client); - EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asap-broker"), _, _)).Times(0); + EXPECT_CALL(mock_http_client, + Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asap-broker"), _, _)).Times(0); MockGet("error_response"); data_broker->GetNext(&info, expected_group_id, nullptr); } @@ -378,9 +390,9 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsEofStreamFromHttpClientUntilTimeout MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}"))); + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}"))); data_broker->SetTimeout(300); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -393,18 +405,17 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataAfterTimeoutEvenIfOtherErrorO data_broker->SetTimeout(300); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":" + std::to_string(expected_dataset_id) + - ",\"id_max\":2,\"next_substream\":\"""\"}"))); - - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token, _, _)).Times(AtLeast(1)).WillRepeatedly(DoAll( - SetArgPointee<1>(HttpCode::NotFound), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":" + std::to_string(expected_dataset_id) + + ",\"id_max\":2,\"next_substream\":\"""\"}"))); + EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/" + + std::to_string(expected_dataset_id) + "?token=" + + expected_token, _, _)).Times(AtLeast(1)).WillRepeatedly(DoAll( + SetArgPointee<1>(HttpCode::NotFound), + SetArgPointee<2>(nullptr), + Return(""))); data_broker->SetTimeout(300); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -412,14 +423,13 @@ TEST_F(ServerDataBrokerTests, GetImageReturnsNoDataAfterTimeoutEvenIfOtherErrorO ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData)); } - TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnTransferError) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::InternalServerError), - SetArgPointee<2>(asapo::HttpErrorTemplates::kTransferError.Generate("sss").release()), - Return(""))); + SetArgPointee<1>(HttpCode::InternalServerError), + SetArgPointee<2>(asapo::HttpErrorTemplates::kTransferError.Generate("sss").release()), + Return(""))); data_broker->SetTimeout(300); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -428,23 +438,21 @@ TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnTransferError) { ASSERT_THAT(err->Explain(), HasSubstr("sss")); } - ACTION(AssignArg2) { *arg2 = asapo::HttpErrorTemplates::kConnectionError.Generate().release(); } - TEST_F(ServerDataBrokerTests, GetNextRetriesIfConnectionHttpClientErrorUntilTimeout) { EXPECT_CALL(mock_http_client, Get_t(HasSubstr(expected_server_uri + "/asapo-discovery/asapo-broker"), _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(expected_broker_uri))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(expected_broker_uri))); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).Times(AtLeast(2)).WillRepeatedly(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - AssignArg2(), - Return(""))); + SetArgPointee<1>(HttpCode::Conflict), + AssignArg2(), + Return(""))); data_broker->SetTimeout(300); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -456,9 +464,9 @@ TEST_F(ServerDataBrokerTests, GetNextImageReturnsImmediatelyOnFinshedSubstream) MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(HasSubstr("next"), _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_substream\":\"next\"}"))); + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":2,\"id_max\":2,\"next_substream\":\"next\"}"))); data_broker->SetTimeout(300); auto err = data_broker->GetNext(&info, expected_group_id, nullptr); @@ -543,7 +551,6 @@ TEST_F(ServerDataBrokerTests, GetImageCallsReadFromFileIfZeroBufId) { FileData data; - EXPECT_CALL(mock_netclient, GetData_t(_, _)).Times(0); MockReadDataFromFile(); @@ -551,14 +558,13 @@ TEST_F(ServerDataBrokerTests, GetImageCallsReadFromFileIfZeroBufId) { data_broker->GetNext(&info, expected_group_id, &data); } - TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsErrorCreateGroup) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Post_t(HasSubstr("creategroup"), _, "", _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::BadRequest), - SetArgPointee<4>(nullptr), - Return(""))); + SetArgPointee<3>(HttpCode::BadRequest), + SetArgPointee<4>(nullptr), + Return(""))); data_broker->SetTimeout(100); asapo::Error err; @@ -567,15 +573,14 @@ TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsErrorCreateGroup) { ASSERT_THAT(groupid, Eq("")); } - TEST_F(ServerDataBrokerTests, GenerateNewGroupIdReturnsGroupID) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/creategroup?token=" + expected_token, _, "", _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(expected_group_id))); + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(expected_group_id))); data_broker->SetTimeout(100); asapo::Error err; @@ -588,12 +593,13 @@ TEST_F(ServerDataBrokerTests, ResetCounterByDefaultUsesCorrectUri) { MockGetBrokerUri(); data_broker->SetTimeout(100); - EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + - "/resetcounter?token=" + expected_token + "&value=0", _, _, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(""))); + EXPECT_CALL(mock_http_client, + Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + + expected_group_id + + "/resetcounter?token=" + expected_token + "&value=0", _, _, _, _)).WillOnce(DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(""))); auto err = data_broker->ResetLastReadMarker(expected_group_id); ASSERT_THAT(err, Eq(nullptr)); } @@ -602,28 +608,28 @@ TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUri) { MockGetBrokerUri(); data_broker->SetTimeout(100); - EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + - "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(""))); + EXPECT_CALL(mock_http_client, + Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + + expected_group_id + + "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(""))); auto err = data_broker->SetLastReadMarker(10, expected_group_id); ASSERT_THAT(err, Eq(nullptr)); } - TEST_F(ServerDataBrokerTests, ResetCounterUsesCorrectUriWithSubstream) { MockGetBrokerUri(); data_broker->SetTimeout(100); EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/" + - expected_group_id + - "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(""))); + expected_substream + "/" + + expected_group_id + + "/resetcounter?token=" + expected_token + "&value=10", _, _, _, _)).WillOnce(DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(""))); auto err = data_broker->SetLastReadMarker(10, expected_group_id, expected_substream); ASSERT_THAT(err, Eq(nullptr)); } @@ -633,11 +639,11 @@ TEST_F(ServerDataBrokerTests, GetCurrentSizeUsesCorrectUri) { data_broker->SetTimeout(100); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + - "/default/size?token=" - + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return("{\"size\":10}"))); + "/default/size?token=" + + expected_token, _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return("{\"size\":10}"))); asapo::Error err; auto size = data_broker->GetCurrentSize(&err); ASSERT_THAT(err, Eq(nullptr)); @@ -649,45 +655,43 @@ TEST_F(ServerDataBrokerTests, GetCurrentSizeUsesCorrectUriWithSubstream) { data_broker->SetTimeout(100); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/size?token=" - + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return("{\"size\":10}"))); + expected_substream + "/size?token=" + + expected_token, _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return("{\"size\":10}"))); asapo::Error err; auto size = data_broker->GetCurrentSize(expected_substream, &err); ASSERT_THAT(err, Eq(nullptr)); ASSERT_THAT(size, Eq(10)); } - TEST_F(ServerDataBrokerTests, GetCurrentSizeErrorOnWrongResponce) { MockGetBrokerUri(); data_broker->SetTimeout(100); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + - "/default/size?token=" - + expected_token, _, _)).WillRepeatedly(DoAll( - SetArgPointee<1>(HttpCode::Unauthorized), - SetArgPointee<2>(nullptr), - Return(""))); + "/default/size?token=" + + expected_token, _, _)).WillRepeatedly(DoAll( + SetArgPointee<1>(HttpCode::Unauthorized), + SetArgPointee<2>(nullptr), + Return(""))); asapo::Error err; auto size = data_broker->GetCurrentSize(&err); ASSERT_THAT(err, Ne(nullptr)); ASSERT_THAT(size, Eq(0)); } - TEST_F(ServerDataBrokerTests, GetNDataErrorOnWrongParse) { MockGetBrokerUri(); data_broker->SetTimeout(100); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + - "/default/size?token=" - + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return("{\"siz\":10}"))); + "/default/size?token=" + + expected_token, _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return("{\"siz\":10}"))); asapo::Error err; auto size = data_broker->GetCurrentSize(&err); ASSERT_THAT(err, Ne(nullptr)); @@ -700,35 +704,33 @@ TEST_F(ServerDataBrokerTests, GetByIdUsesCorrectUri) { auto to_send = CreateFI(); auto json = to_send.Json(); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id - + "/" + std::to_string( - expected_dataset_id) + "?token=" - + expected_token, _, + EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/" + + std::to_string( + expected_dataset_id) + "?token=" + + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(json))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(json))); - auto err = data_broker->GetById(expected_dataset_id, &info, expected_group_id, nullptr); + auto err = data_broker->GetById(expected_dataset_id, &info, nullptr); ASSERT_THAT(err, Eq(nullptr)); ASSERT_THAT(info.name, Eq(to_send.name)); } - TEST_F(ServerDataBrokerTests, GetByIdTimeouts) { MockGetBrokerUri(); data_broker->SetTimeout(10); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return(""))); + EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/" + + std::to_string(expected_dataset_id) + "?token=" + + expected_token, _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return(""))); - auto err = data_broker->GetById(expected_dataset_id, &info, expected_group_id, nullptr); + auto err = data_broker->GetById(expected_dataset_id, &info, nullptr); ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kNoData)); } @@ -737,15 +739,14 @@ TEST_F(ServerDataBrokerTests, GetByIdReturnsEndOfStream) { MockGetBrokerUri(); data_broker->SetTimeout(10); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}"))); - + EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/" + + std::to_string(expected_dataset_id) + "?token=" + + expected_token, _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":1,\"id_max\":1,\"next_substream\":\"""\"}"))); - auto err = data_broker->GetById(expected_dataset_id, &info, expected_group_id, nullptr); + auto err = data_broker->GetById(expected_dataset_id, &info, nullptr); ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kEndOfStream)); } @@ -754,32 +755,29 @@ TEST_F(ServerDataBrokerTests, GetByIdReturnsEndOfStreamWhenIdTooLarge) { MockGetBrokerUri(); data_broker->SetTimeout(10); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::Conflict), - SetArgPointee<2>(nullptr), - Return("{\"op\":\"get_record_by_id\",\"id\":100,\"id_max\":1,\"next_substream\":\"""\"}"))); - + EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/" + + std::to_string(expected_dataset_id) + "?token=" + + expected_token, _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::Conflict), + SetArgPointee<2>(nullptr), + Return("{\"op\":\"get_record_by_id\",\"id\":100,\"id_max\":1,\"next_substream\":\"""\"}"))); - auto err = data_broker->GetById(expected_dataset_id, &info, expected_group_id, nullptr); + auto err = data_broker->GetById(expected_dataset_id, &info, nullptr); ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kEndOfStream)); } - TEST_F(ServerDataBrokerTests, GetMetaDataOK) { MockGetBrokerUri(); data_broker->SetTimeout(100); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + - "/default/0/meta/0?token=" - + expected_token, _, + "/default/0/meta/0?token=" + + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(expected_metadata))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(expected_metadata))); asapo::Error err; auto res = data_broker->GetBeamtimeMeta(&err); @@ -789,14 +787,13 @@ TEST_F(ServerDataBrokerTests, GetMetaDataOK) { } - TEST_F(ServerDataBrokerTests, QueryImagesReturnError) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::BadRequest), - SetArgPointee<4>(nullptr), - Return("error in query"))); + SetArgPointee<3>(HttpCode::BadRequest), + SetArgPointee<4>(nullptr), + Return("error in query"))); data_broker->SetTimeout(1000); asapo::Error err; @@ -807,14 +804,13 @@ TEST_F(ServerDataBrokerTests, QueryImagesReturnError) { ASSERT_THAT(images.size(), Eq(0)); } - TEST_F(ServerDataBrokerTests, QueryImagesReturnEmptyResults) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return("[]"))); + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return("[]"))); data_broker->SetTimeout(100); asapo::Error err; @@ -836,9 +832,9 @@ TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseArray) { EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(responce_string))); + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(responce_string))); data_broker->SetTimeout(100); asapo::Error err; @@ -855,11 +851,10 @@ TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseRecorsd) { auto responce_string = R"([{"bla":1},{"err":}])"; - EXPECT_CALL(mock_http_client, Post_t(HasSubstr("queryimages"), _, expected_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(responce_string))); + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(responce_string))); data_broker->SetTimeout(100); asapo::Error err; @@ -870,8 +865,6 @@ TEST_F(ServerDataBrokerTests, QueryImagesWrongResponseRecorsd) { ASSERT_THAT(err->Explain(), HasSubstr("response")); } - - TEST_F(ServerDataBrokerTests, QueryImagesReturnRecords) { MockGetBrokerUri(); @@ -883,12 +876,12 @@ TEST_F(ServerDataBrokerTests, QueryImagesReturnRecords) { auto json2 = rec2.Json(); auto responce_string = "[" + json1 + "," + json2 + "]"; - - EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0" + - "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(responce_string))); + EXPECT_CALL(mock_http_client, + Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0" + + "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(responce_string))); data_broker->SetTimeout(100); asapo::Error err; @@ -906,11 +899,11 @@ TEST_F(ServerDataBrokerTests, QueryImagesUsesCorrectUriWithSubstream) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/0" + - "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return("[]"))); + expected_substream + "/0" + + "/queryimages?token=" + expected_token, _, expected_query_string, _, _)).WillOnce(DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return("[]"))); data_broker->SetTimeout(100); asapo::Error err; @@ -924,17 +917,16 @@ TEST_F(ServerDataBrokerTests, GetNextDatasetUsesCorrectUri) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + "/next?token=" - + expected_token + "&dataset=true", _, + expected_group_id + "/next?token=" + + expected_token + "&dataset=true&minsize=0", _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); asapo::Error err; - data_broker->GetNextDataset(expected_group_id, &err); + data_broker->GetNextDataset(expected_group_id, 0, &err); } - TEST_F(ServerDataBrokerTests, GetDataSetReturnsFileInfos) { asapo::Error err; MockGetBrokerUri(); @@ -949,14 +941,14 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsFileInfos) { auto json3 = to_send3.Json(); auto json = std::string("{") + - "\"_id\":1," + - "\"size\":3," + - "\"images\":[" + json1 + "," + json2 + "," + json3 + "]" + - "}"; + "\"_id\":1," + + "\"size\":3," + + "\"images\":[" + json1 + "," + json2 + "," + json3 + "]" + + "}"; MockGet(json); - auto dataset = data_broker->GetNextDataset(expected_group_id, &err); + auto dataset = data_broker->GetNextDataset(expected_group_id, 0, &err); ASSERT_THAT(err, Eq(nullptr)); @@ -967,12 +959,81 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsFileInfos) { ASSERT_THAT(dataset.content[2].id, Eq(to_send3.id)); } +TEST_F(ServerDataBrokerTests, GetDataSetReturnsPartialFileInfos) { + asapo::Error err; + MockGetBrokerUri(); + + auto to_send1 = CreateFI(); + auto json1 = to_send1.Json(); + auto to_send2 = CreateFI(); + to_send2.id = 2; + auto json2 = to_send2.Json(); + auto to_send3 = CreateFI(); + to_send3.id = 3; + auto json3 = to_send3.Json(); + + auto json = std::string("{") + + "\"_id\":1," + + "\"size\":3," + + "\"images\":[" + json1 + "," + json2 + "]" + + "}"; + + MockGet(json, asapo::HttpCode::PartialContent); + + auto dataset = data_broker->GetNextDataset(expected_group_id, 0, &err); + + ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kPartialData)); + + auto err_data = static_cast<const asapo::PartialErrorData*>(err->GetCustomData()); + ASSERT_THAT(err_data->expected_size, Eq(3)); + ASSERT_THAT(err_data->id, Eq(1)); + + ASSERT_THAT(dataset.id, Eq(1)); + ASSERT_THAT(dataset.content.size(), Eq(2)); + ASSERT_THAT(dataset.content[0].id, Eq(to_send1.id)); + ASSERT_THAT(dataset.content[1].id, Eq(to_send2.id)); +} + +TEST_F(ServerDataBrokerTests, GetDataSetByIdReturnsPartialFileInfos) { + asapo::Error err; + MockGetBrokerUri(); + + auto to_send1 = CreateFI(); + auto json1 = to_send1.Json(); + auto to_send2 = CreateFI(); + to_send2.id = 2; + auto json2 = to_send2.Json(); + auto to_send3 = CreateFI(); + to_send3.id = 3; + auto json3 = to_send3.Json(); + + auto json = std::string("{") + + "\"_id\":1," + + "\"size\":3," + + "\"images\":[" + json1 + "," + json2 + "]" + + "}"; + + MockGet(json, asapo::HttpCode::PartialContent); + + auto dataset = data_broker->GetDatasetById(1, 0, &err); + + ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kPartialData)); + auto err_data = static_cast<const asapo::PartialErrorData*>(err->GetCustomData()); + ASSERT_THAT(err_data->expected_size, Eq(3)); + ASSERT_THAT(err_data->id, Eq(1)); + + ASSERT_THAT(dataset.id, Eq(1)); + ASSERT_THAT(dataset.content.size(), Eq(2)); + ASSERT_THAT(dataset.content[0].id, Eq(to_send1.id)); + ASSERT_THAT(dataset.content[1].id, Eq(to_send2.id)); +} + TEST_F(ServerDataBrokerTests, GetDataSetReturnsParseError) { MockGetBrokerUri(); MockGet("error_response"); asapo::Error err; - auto dataset = data_broker->GetNextDataset(expected_group_id, &err); + auto dataset = data_broker->GetNextDataset(expected_group_id, 0, &err); ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kInterruptedTransaction)); ASSERT_THAT(dataset.content.size(), Eq(0)); @@ -983,60 +1044,59 @@ TEST_F(ServerDataBrokerTests, GetDataSetReturnsParseError) { TEST_F(ServerDataBrokerTests, GetLastDatasetUsesCorrectUri) { MockGetBrokerUri(); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + "/last?token=" - + expected_token + "&dataset=true", _, - _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + EXPECT_CALL(mock_http_client, + Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/last?token=" + + expected_token + "&dataset=true&minsize=2", _, + _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); asapo::Error err; - data_broker->GetLastDataset(expected_group_id, &err); + data_broker->GetLastDataset(2, &err); } TEST_F(ServerDataBrokerTests, GetLastDatasetUsesCorrectUriWithSubstream) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/" + - expected_group_id + "/last?token=" - + expected_token + "&dataset=true", _, + expected_substream + "/0/last?token=" + + expected_token + "&dataset=true&minsize=1", _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); asapo::Error err; - data_broker->GetLastDataset(expected_group_id, expected_substream, &err); + data_broker->GetLastDataset(expected_substream, 1, &err); } - TEST_F(ServerDataBrokerTests, GetDatasetByIdUsesCorrectUri) { MockGetBrokerUri(); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id + - "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token + "&dataset=true", _, + EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/0/" + + std::to_string(expected_dataset_id) + "?token=" + + expected_token + "&dataset=true" + "&minsize=0", _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); asapo::Error err; - data_broker->GetDatasetById(expected_dataset_id, expected_group_id, &err); + data_broker->GetDatasetById(expected_dataset_id, 0, &err); } TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUri) { MockGetBrokerUri(); - std::string return_substreams = R"({"substreams":[{"lastId":123,"name":"test","timestampCreated":1000000},{"name":"test1","timestampCreated":2000000}]})"; - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/0/substreams" - + "?token=" + expected_token+"&from=stream_from", _, - _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(return_substreams))); + std::string return_substreams = + R"({"substreams":[{"lastId":123,"name":"test","timestampCreated":1000000},{"name":"test1","timestampCreated":2000000}]})"; + EXPECT_CALL(mock_http_client, + Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/0/substreams" + + "?token=" + expected_token + "&from=stream_from", _, + _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(return_substreams))); asapo::Error err; - auto substreams = data_broker->GetSubstreamList("stream_from",&err); + auto substreams = data_broker->GetSubstreamList("stream_from", &err); ASSERT_THAT(err, Eq(nullptr)); ASSERT_THAT(substreams.size(), Eq(2)); ASSERT_THAT(substreams.size(), 2); @@ -1044,21 +1104,20 @@ TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUri) { ASSERT_THAT(substreams[1].Json(false), R"({"name":"test1","timestampCreated":2000000})"); } - TEST_F(ServerDataBrokerTests, GetSubstreamListUsesCorrectUriWithoutFrom) { MockGetBrokerUri(); - EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/0/substreams" - + "?token=" + expected_token, _, - _)).WillOnce(DoAll( + EXPECT_CALL(mock_http_client, + Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/0/substreams" + + "?token=" + expected_token, _, + _)).WillOnce(DoAll( SetArgPointee<1>(HttpCode::OK), SetArgPointee<2>(nullptr), Return("")));; asapo::Error err; - auto substreams = data_broker->GetSubstreamList("",&err); + auto substreams = data_broker->GetSubstreamList("", &err); } - void ServerDataBrokerTests::MockBeforeFTS(FileData* data) { auto to_send = CreateFI(); auto json = to_send.Json(); @@ -1070,21 +1129,21 @@ void ServerDataBrokerTests::MockBeforeFTS(FileData* data) { void ServerDataBrokerTests::ExpectFolderToken() { std::string expected_folder_query_string = "{\"Folder\":\"" + expected_path + "\",\"BeamtimeId\":\"" + - expected_beamtime_id - + "\",\"Token\":\"" + expected_token + "\"}"; + expected_beamtime_id + + "\",\"Token\":\"" + expected_token + "\"}"; EXPECT_CALL(mock_http_client, Post_t(HasSubstr(expected_server_uri + "/asapo-authorizer/folder"), _, expected_folder_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(expected_folder_token) - )); + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(expected_folder_token) + )); } ACTION_P(AssignArg3, assign) { if (assign) { - asapo::FileData data = asapo::FileData{new uint8_t[1] }; + asapo::FileData data = asapo::FileData{new uint8_t[1]}; data[0] = expected_value; *arg3 = std::move(data); } @@ -1092,27 +1151,35 @@ ACTION_P(AssignArg3, assign) { void ServerDataBrokerTests::ExpectFileTransfer(const asapo::ConsumerErrorTemplate* p_err_template) { EXPECT_CALL(mock_http_client, PostReturnArray_t(HasSubstr(expected_fts_uri + "/transfer"), - expected_cookie, expected_fts_query_string, _, expected_image_size, _)).WillOnce(DoAll( - SetArgPointee<5>(HttpCode::OK), - AssignArg3(p_err_template == nullptr), - Return(p_err_template == nullptr ? nullptr : p_err_template->Generate().release()) - )); + expected_cookie, + expected_fts_query_string, + _, + expected_image_size, + _)).WillOnce(DoAll( + SetArgPointee<5>(HttpCode::OK), + AssignArg3(p_err_template == nullptr), + Return(p_err_template == nullptr ? nullptr : p_err_template->Generate().release()) + )); } void ServerDataBrokerTests::ExpectRepeatedFileTransfer() { EXPECT_CALL(mock_http_client, PostReturnArray_t(HasSubstr(expected_fts_uri + "/transfer"), - expected_cookie, expected_fts_query_string, _, expected_image_size, _)). - WillOnce(DoAll( - SetArgPointee<5>(HttpCode::Unauthorized), - Return(nullptr))). - WillOnce(DoAll( - SetArgPointee<5>(HttpCode::OK), - Return(nullptr) - )); + expected_cookie, + expected_fts_query_string, + _, + expected_image_size, + _)). + WillOnce(DoAll( + SetArgPointee<5>(HttpCode::Unauthorized), + Return(nullptr))). + WillOnce(DoAll( + SetArgPointee<5>(HttpCode::OK), + Return(nullptr) + )); } void ServerDataBrokerTests::AssertSingleFileTransfer() { - asapo::FileData data = asapo::FileData{new uint8_t[1] }; + asapo::FileData data = asapo::FileData{new uint8_t[1]}; MockGetBrokerUri(); MockBeforeFTS(&data); ExpectFolderToken(); @@ -1127,7 +1194,6 @@ void ServerDataBrokerTests::AssertSingleFileTransfer() { Mock::VerifyAndClearExpectations(&mock_io); } - TEST_F(ServerDataBrokerTests, GetImageUsesFileTransferServiceIfCannotReadFromCache) { AssertSingleFileTransfer(); } @@ -1137,28 +1203,32 @@ TEST_F(ServerDataBrokerTests, FileTransferReadsFileSize) { EXPECT_CALL(mock_http_client, Post_t(HasSubstr("sizeonly=true"), expected_cookie, expected_fts_query_string, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return("{\"file_size\":5}") - )); + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return("{\"file_size\":5}") + )); EXPECT_CALL(mock_http_client, PostReturnArray_t(HasSubstr(expected_fts_uri + "/transfer"), - expected_cookie, expected_fts_query_string, _, 5, _)).WillOnce(DoAll( - SetArgPointee<5>(HttpCode::OK), - AssignArg3(nullptr), - Return(nullptr) - )); + expected_cookie, + expected_fts_query_string, + _, + 5, + _)).WillOnce(DoAll( + SetArgPointee<5>(HttpCode::OK), + AssignArg3(nullptr), + Return(nullptr) + )); FileData data; info.size = 0; info.buf_id = 0; - auto err = fts_data_broker->RetrieveData(&info, &data); + auto err = fts_data_broker->RetrieveData(&info, &data); } TEST_F(ServerDataBrokerTests, GetImageReusesTokenAndUri) { AssertSingleFileTransfer(); - asapo::FileData data = asapo::FileData{new uint8_t[1] }; + asapo::FileData data = asapo::FileData{new uint8_t[1]}; MockBeforeFTS(&data); ExpectFileTransfer(nullptr); @@ -1180,30 +1250,30 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUri) { MockGetBrokerUri(); auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}"; EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/" + - expected_group_id - + "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(""))); + expected_substream + "/" + + expected_group_id + + "/" + std::to_string(expected_dataset_id) + "?token=" + + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(""))); auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id, expected_substream); ASSERT_THAT(err, Eq(nullptr)); } - TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultSubStream) { MockGetBrokerUri(); auto expected_acknowledge_command = "{\"Op\":\"ackimage\"}"; - EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + - expected_group_id - + "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(""))); + EXPECT_CALL(mock_http_client, + Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" + + expected_group_id + + "/" + std::to_string(expected_dataset_id) + "?token=" + + expected_token, _, expected_acknowledge_command, _, _)).WillOnce(DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(""))); auto err = data_broker->Acknowledge(expected_group_id, expected_dataset_id); @@ -1213,11 +1283,11 @@ TEST_F(ServerDataBrokerTests, AcknowledgeUsesCorrectUriWithDefaultSubStream) { void ServerDataBrokerTests::ExpectIdList(bool error) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/" + - expected_group_id + "/nacks?token=" + expected_token + "&from=1&to=0", _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(error ? "" : "{\"unacknowledged\":[1,2,3]}"))); + expected_substream + "/" + + expected_group_id + "/nacks?token=" + expected_token + "&from=1&to=0", _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(error ? "" : "{\"unacknowledged\":[1,2,3]}"))); } TEST_F(ServerDataBrokerTests, GetUnAcknowledgedListReturnsIds) { @@ -1229,17 +1299,15 @@ TEST_F(ServerDataBrokerTests, GetUnAcknowledgedListReturnsIds) { ASSERT_THAT(err, Eq(nullptr)); } - void ServerDataBrokerTests::ExpectLastAckId(bool empty_response) { EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/" + - expected_group_id + "/lastack?token=" + expected_token, _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(empty_response ? "{\"lastAckId\":0}" : "{\"lastAckId\":1}"))); + expected_substream + "/" + + expected_group_id + "/lastack?token=" + expected_token, _, _)).WillOnce(DoAll( + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(empty_response ? "{\"lastAckId\":0}" : "{\"lastAckId\":1}"))); } - TEST_F(ServerDataBrokerTests, GetLastAcknowledgeUsesOk) { MockGetBrokerUri(); ExpectLastAckId(false); @@ -1262,7 +1330,7 @@ TEST_F(ServerDataBrokerTests, GetLastAcknowledgeReturnsNoData) { TEST_F(ServerDataBrokerTests, GetByIdErrorsForId0) { - auto err = data_broker->GetById(0, &info, expected_group_id, nullptr); + auto err = data_broker->GetById(0, &info, nullptr); ASSERT_THAT(err, Eq(asapo::ConsumerErrorTemplates::kWrongInput)); } @@ -1271,29 +1339,29 @@ TEST_F(ServerDataBrokerTests, ResendNacks) { MockGetBrokerUri(); EXPECT_CALL(mock_http_client, Get_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/default/" - + expected_group_id + "/next?token=" - + expected_token + "&resend_nacks=true&delay_sec=10&resend_attempts=3", _, + + expected_group_id + "/next?token=" + + expected_token + "&resend_nacks=true&delay_sec=10&resend_attempts=3", _, _)).WillOnce(DoAll( - SetArgPointee<1>(HttpCode::OK), - SetArgPointee<2>(nullptr), - Return(""))); + SetArgPointee<1>(HttpCode::OK), + SetArgPointee<2>(nullptr), + Return(""))); data_broker->SetResendNacs(true, 10, 3); data_broker->GetNext(&info, expected_group_id, nullptr); } - TEST_F(ServerDataBrokerTests, NegativeAcknowledgeUsesCorrectUri) { MockGetBrokerUri(); auto expected_neg_acknowledge_command = R"({"Op":"negackimage","Params":{"DelaySec":10}})"; EXPECT_CALL(mock_http_client, Post_t(expected_broker_uri + "/database/beamtime_id/" + expected_stream + "/" + - expected_substream + "/" + - expected_group_id - + "/" + std::to_string(expected_dataset_id) + "?token=" - + expected_token, _, expected_neg_acknowledge_command, _, _)).WillOnce(DoAll( - SetArgPointee<3>(HttpCode::OK), - SetArgPointee<4>(nullptr), - Return(""))); + expected_substream + "/" + + expected_group_id + + "/" + std::to_string(expected_dataset_id) + "?token=" + + expected_token, _, expected_neg_acknowledge_command, _, _)).WillOnce( + DoAll( + SetArgPointee<3>(HttpCode::OK), + SetArgPointee<4>(nullptr), + Return(""))); auto err = data_broker->NegativeAcknowledge(expected_group_id, expected_dataset_id, 10, expected_substream); diff --git a/consumer/api/python/asapo_consumer.pxd b/consumer/api/python/asapo_consumer.pxd index 867139750dad8c31963e0efd8effa2525636b2dc..34450e0e146d167b08f7e69c988ccb2e3aac3acd 100644 --- a/consumer/api/python/asapo_consumer.pxd +++ b/consumer/api/python/asapo_consumer.pxd @@ -39,6 +39,7 @@ cdef extern from "asapo_consumer.h" namespace "asapo": vector[FileInfo].iterator end() struct DataSet: uint64_t id + uint64_t expected_size FileInfos content struct SourceCredentials: string beamtime_id @@ -62,8 +63,8 @@ cdef extern from "asapo_consumer.h" namespace "asapo" nogil: void ForceNoRdma() NetworkConnectionType CurrentConnectionType() Error GetNext(FileInfo* info, string group_id,string substream, FileData* data) - Error GetLast(FileInfo* info, string group_id,string substream, FileData* data) - Error GetById(uint64_t id, FileInfo* info, string group_id, string substream, FileData* data) + Error GetLast(FileInfo* info, string substream, FileData* data) + Error GetById(uint64_t id, FileInfo* info, string substream, FileData* data) uint64_t GetCurrentSize(string substream, Error* err) Error SetLastReadMarker(uint64_t value, string group_id, string substream) Error ResetLastReadMarker(string group_id, string substream) @@ -74,9 +75,9 @@ cdef extern from "asapo_consumer.h" namespace "asapo" nogil: string GenerateNewGroupId(Error* err) string GetBeamtimeMeta(Error* err) FileInfos QueryImages(string query, string substream, Error* err) - DataSet GetNextDataset(string group_id, string substream, Error* err) - DataSet GetLastDataset(string group_id, string substream, Error* err) - DataSet GetDatasetById(uint64_t id, string group_id, string substream, Error* err) + DataSet GetNextDataset(string group_id, string substream, uint64_t min_size, Error* err) + DataSet GetLastDataset(string substream, uint64_t min_size, Error* err) + DataSet GetDatasetById(uint64_t id, string substream, uint64_t min_size, Error* err) Error RetrieveData(FileInfo* info, FileData* data) vector[StreamInfo] GetSubstreamList(string from_substream, Error* err) void SetResendNacs(bool resend, uint64_t delay_sec, uint64_t resend_attempts) @@ -96,6 +97,8 @@ cdef extern from "asapo_consumer.h" namespace "asapo": ErrorTemplateInterface kInterruptedTransaction "asapo::ConsumerErrorTemplates::kInterruptedTransaction" ErrorTemplateInterface kLocalIOError "asapo::ConsumerErrorTemplates::kLocalIOError" ErrorTemplateInterface kWrongInput "asapo::ConsumerErrorTemplates::kWrongInput" + ErrorTemplateInterface kPartialData "asapo::ConsumerErrorTemplates::kPartialData" + cdef cppclass ConsumerErrorData: uint64_t id uint64_t id_max diff --git a/consumer/api/python/asapo_consumer.pyx.in b/consumer/api/python/asapo_consumer.pyx.in index 0022eef0a3f6e76122e0640efdd3bc23aea52f8c..67de7608c905b0403afb64eab15311a1d69cdf87 100644 --- a/consumer/api/python/asapo_consumer.pyx.in +++ b/consumer/api/python/asapo_consumer.pyx.in @@ -52,19 +52,23 @@ class AsapoStreamFinishedError(AsapoConsumerError): self.id_max = id_max self.next_substream = _str(next_substream) - class AsapoEndOfStreamError(AsapoConsumerError): def __init__(self,message,id_max=None): AsapoConsumerError.__init__(self,message) self.id_max = id_max +class AsapoPartialDataError(AsapoConsumerError): + def __init__(self,message,partial_data): + AsapoConsumerError.__init__(self,message) + self.partial_data = partial_data + class AsapoNoDataError(AsapoConsumerError): def __init__(self,message,id=None,id_max=None): AsapoConsumerError.__init__(self,message) self.id_max = id_max self.id = id -cdef throw_exception(Error& err): +cdef throw_exception(Error& err, res = None): cdef ConsumerErrorData* data error_string = _str(err.get().Explain()) if err == kEndOfStream: @@ -85,6 +89,8 @@ cdef throw_exception(Error& err): raise AsapoNoDataError(error_string,data.id,data.id_max) else: raise AsapoNoDataError(error_string) + elif err == kPartialData: + raise AsapoPartialDataError(error_string, res) elif err == kWrongInput: raise AsapoWrongInputError(error_string) elif err == kLocalIOError: @@ -111,10 +117,10 @@ cdef class PyDataBroker: err = self.c_broker.GetNext(&info, b_group_id,b_substream, p_data) elif op == "last": with nogil: - err = self.c_broker.GetLast(&info, b_group_id,b_substream, p_data) + err = self.c_broker.GetLast(&info, b_substream, p_data) elif op == "id": with nogil: - err = self.c_broker.GetById(id, &info, b_group_id,b_substream, p_data) + err = self.c_broker.GetById(id, &info, b_substream, p_data) if err: throw_exception(err) info_str = _str(info.Json()) @@ -128,10 +134,10 @@ cdef class PyDataBroker: return arr,meta def get_next(self, group_id, substream = "default", meta_only = True): return self._op("next",group_id,substream,meta_only,0) - def get_last(self, group_id, substream = "default", meta_only = True): - return self._op("last",group_id,substream,meta_only,0) - def get_by_id(self,uint64_t id,group_id, substream = "default",meta_only = True): - return self._op("id",group_id,substream,meta_only,id) + def get_last(self, substream = "default", meta_only = True): + return self._op("last","",substream,meta_only,0) + def get_by_id(self,uint64_t id,substream = "default",meta_only = True): + return self._op("id","",substream,meta_only,id) def retrieve_data(self,meta): json_str = json.dumps(meta) cdef FileInfo info @@ -273,7 +279,7 @@ cdef class PyDataBroker: for fi in file_infos: json_list.append(json.loads(_str(fi.Json()))) return json_list - def _op_dataset(self, op, group_id, substream, uint64_t id): + def _op_dataset(self, op, group_id, substream, uint64_t min_size, uint64_t id): cdef string b_group_id = _bytes(group_id) cdef string b_substream = _bytes(substream) cdef FileInfos file_infos @@ -281,25 +287,26 @@ cdef class PyDataBroker: cdef Error err if op == "next": with nogil: - dataset = self.c_broker.GetNextDataset(b_group_id,b_substream, &err) + dataset = self.c_broker.GetNextDataset(b_group_id,b_substream, min_size, &err) elif op == "last": with nogil: - dataset = self.c_broker.GetLastDataset(b_group_id,b_substream, &err) + dataset = self.c_broker.GetLastDataset(b_substream, min_size, &err) elif op == "id": with nogil: - dataset = self.c_broker.GetDatasetById(id, b_group_id,b_substream, &err) - if err: - throw_exception(err) + dataset = self.c_broker.GetDatasetById(id, b_substream, min_size, &err) json_list = [] for fi in dataset.content: json_list.append(json.loads(_str(fi.Json()))) - return dataset.id, json_list - def get_next_dataset(self, group_id, substream = "default"): - return self._op_dataset("next",group_id,substream,0) - def get_last_dataset(self, group_id, substream = "default"): - return self._op_dataset("last",group_id,substream,0) - def get_dataset_by_id(self, uint64_t id, group_id, substream = "default"): - return self._op_dataset("id",group_id,substream,id) + res={'id':dataset.id,'expected_size':dataset.expected_size,'content':json_list} + if err: + throw_exception(err,res) + return res + def get_next_dataset(self, group_id, substream = "default", min_size = 0): + return self._op_dataset("next",group_id,substream,min_size,0) + def get_last_dataset(self, substream = "default", min_size = 0): + return self._op_dataset("last","0",substream,min_size,0) + def get_dataset_by_id(self, uint64_t id, substream = "default", min_size = 0): + return self._op_dataset("id","0",substream,min_size,id) def get_beamtime_meta(self): cdef Error err cdef string meta_str diff --git a/examples/consumer/getnext_broker/getnext_broker.cpp b/examples/consumer/getnext_broker/getnext_broker.cpp index 51c5e1694927cdafce88f13e27a3bcae202b497e..e8c9a826e496d8b2b0569ea8fe4f1ae6a3bb0309 100644 --- a/examples/consumer/getnext_broker/getnext_broker.cpp +++ b/examples/consumer/getnext_broker/getnext_broker.cpp @@ -132,7 +132,7 @@ StartThreads(const Args& params, std::vector<int>* nfiles, std::vector<int>* err bool isFirstFile = true; while (true) { if (params.datasets) { - auto dataset = broker->GetNextDataset(group_id, &err); + auto dataset = broker->GetNextDataset(group_id, 0, &err); if (err == nullptr) { for (auto& fi : dataset.content) { (*nbuf)[i] += fi.buf_id == 0 ? 0 : 1; diff --git a/examples/producer/dummy-data-producer/dummy_data_producer.cpp b/examples/producer/dummy-data-producer/dummy_data_producer.cpp index d5c522d30ad66fde5e4c9b55e803709f2967a2c9..4cd4043ea2567a8faa9e6721c3b27d15d4c61ecb 100644 --- a/examples/producer/dummy-data-producer/dummy_data_producer.cpp +++ b/examples/producer/dummy-data-producer/dummy_data_producer.cpp @@ -163,9 +163,9 @@ bool SendDummyData(asapo::Producer* producer, size_t number_of_byte, uint64_t it } else { for (uint64_t id = 0; id < images_in_set; id++) { auto buffer = CreateMemoryBuffer(number_of_byte); - event_header.subset_id = i + 1; + event_header.id_in_subset = id + 1; event_header.subset_size = images_in_set; - event_header.file_id = id + 1; + event_header.file_id = i + 1; event_header.file_name = std::to_string(i + 1) + "_" + std::to_string(id + 1); if (!stream.empty()) { event_header.file_name = stream + "/" + event_header.file_name; diff --git a/producer/api/cpp/include/producer/common.h b/producer/api/cpp/include/producer/common.h index a1d6fd56f707ad963e9f8ba2392b3bde5ec4aeb4..b4a8892ff7aa478eab2c22ebb7c41e99c535a52e 100644 --- a/producer/api/cpp/include/producer/common.h +++ b/producer/api/cpp/include/producer/common.h @@ -30,18 +30,18 @@ struct EventHeader { EventHeader() {}; EventHeader(uint64_t file_id_i, uint64_t file_size_i, std::string file_name_i, std::string user_metadata_i = "", - uint64_t subset_id_i = 0, + uint64_t id_in_subset_i = 0, uint64_t subset_size_i = 0 ): file_id{file_id_i}, file_size{file_size_i}, file_name{std::move(file_name_i)}, user_metadata{std::move(user_metadata_i)}, - subset_id{subset_id_i}, + id_in_subset{id_in_subset_i}, subset_size{subset_size_i} {}; uint64_t file_id = 0; uint64_t file_size = 0; std::string file_name; std::string user_metadata; - uint64_t subset_id = 0; + uint64_t id_in_subset = 0; uint64_t subset_size = 0; }; diff --git a/producer/api/cpp/src/producer_impl.cpp b/producer/api/cpp/src/producer_impl.cpp index 1a384abf7df2916cce0b1909847e14fa10dff12e..24f11fb982dc414d95875096023ff3c0b41ceff1 100644 --- a/producer/api/cpp/src/producer_impl.cpp +++ b/producer/api/cpp/src/producer_impl.cpp @@ -38,9 +38,9 @@ GenericRequestHeader ProducerImpl::GenerateNextSendRequest(const EventHeader& ev uint64_t ingest_mode) { GenericRequestHeader request{kOpcodeTransferData, event_header.file_id, event_header.file_size, event_header.user_metadata.size(), event_header.file_name, substream}; - if (event_header.subset_id != 0) { + if (event_header.id_in_subset != 0) { request.op_code = kOpcodeTransferSubsetData; - request.custom_data[kPosDataSetId] = event_header.subset_id; + request.custom_data[kPosDataSetId] = event_header.id_in_subset; request.custom_data[kPosDataSetSize] = event_header.subset_size; } request.custom_data[kPosIngestMode] = ingest_mode; @@ -80,7 +80,7 @@ Error CheckProducerRequest(const EventHeader& event_header, uint64_t ingest_mode return ProducerErrorTemplates::kWrongInput.Generate("empty filename"); } - if (event_header.subset_id > 0 && event_header.subset_size == 0) { + if (event_header.id_in_subset > 0 && event_header.subset_size == 0) { return ProducerErrorTemplates::kWrongInput.Generate("subset dimensions"); } @@ -100,6 +100,9 @@ Error ProducerImpl::Send(const EventHeader& event_header, bool manage_data_memory) { auto err = CheckProducerRequest(event_header, ingest_mode); if (err) { + if (!manage_data_memory) { + data.release(); + } log__->Error("error checking request - " + err->Explain()); return err; } @@ -230,6 +233,7 @@ Error ProducerImpl::SendData__(const EventHeader& event_header, FileData data_wrapped = FileData{(uint8_t*)data}; if (auto err = CheckData(ingest_mode, event_header, &data_wrapped)) { + data_wrapped.release(); return err; } diff --git a/producer/api/python/asapo_producer.pxd b/producer/api/python/asapo_producer.pxd index 26ec51600ede3eeb2f40553f08ffb6bc5551941b..b471855ccb4ce7c832e76efe70c1ff8a96e42fe6 100644 --- a/producer/api/python/asapo_producer.pxd +++ b/producer/api/python/asapo_producer.pxd @@ -65,7 +65,7 @@ cdef extern from "asapo_producer.h" namespace "asapo": uint64_t file_size string file_name string user_metadata - uint64_t subset_id + uint64_t id_in_subset uint64_t subset_size cdef extern from "asapo_producer.h" namespace "asapo": @@ -74,7 +74,7 @@ cdef extern from "asapo_producer.h" namespace "asapo": uint64_t file_size string file_name string user_metadata - uint64_t subset_id + uint64_t id_in_subset uint64_t subset_size cdef extern from "asapo_producer.h" namespace "asapo": diff --git a/producer/api/python/asapo_producer.pyx.in b/producer/api/python/asapo_producer.pyx.in index 1319d9439cf139c4284f29ace8824f03b7d0d94b..1040a6fb22a2389428d6d740973388f382f3c916 100644 --- a/producer/api/python/asapo_producer.pyx.in +++ b/producer/api/python/asapo_producer.pyx.in @@ -125,7 +125,6 @@ cdef class PyProducer: Py_XINCREF(<PyObject*>data) if callback != None: Py_XINCREF(<PyObject*>callback) - return cdef EventHeader create_event_header(self,uint64_t id, exposed_path,user_meta,subset,ingest_mode): cdef EventHeader event_header @@ -133,10 +132,10 @@ cdef class PyProducer: event_header.file_name = _bytes(exposed_path) event_header.user_metadata = _bytes(user_meta) if user_meta!=None else "" if subset == None: - event_header.subset_id = 0 + event_header.id_in_subset = 0 event_header.subset_size = 0 else: - event_header.subset_id = subset[0] + event_header.id_in_subset = subset[0] event_header.subset_size = subset[1] return event_header @@ -163,7 +162,7 @@ cdef class PyProducer: :type data: contiguous numpy or bytes array, can be None for INGEST_MODE_TRANSFER_METADATA_ONLY ingest mode :param user_meta: user metadata, default None :type user_meta: JSON string - :param subset: a tuple with two int values (subset id, subset size), default None + :param subset: a tuple with two int values (id in subset, subset size), default None :type subset: tuple :param substream: substream name, default "default" :type substream: string diff --git a/producer/event_monitor_producer/src/main_eventmon.cpp b/producer/event_monitor_producer/src/main_eventmon.cpp index d2ebf9f0aaa605b8e5b1adae3ca25a2bf2365f4e..dcb2bbc359ab35087c4e08c3b6ec2b1f13cf6441 100644 --- a/producer/event_monitor_producer/src/main_eventmon.cpp +++ b/producer/event_monitor_producer/src/main_eventmon.cpp @@ -82,12 +82,12 @@ void HandleSubsets(asapo::EventHeader* header) { return; case asapo::SubSetMode::kBatch: header->subset_size = GetEventMonConfig()->subset_batch_size; - header->subset_id = (header->file_id - 1) / header->subset_size + 1; + header->id_in_subset = (header->file_id - 1) % header->subset_size + 1; + header->file_id = (header->file_id - 1) / header->subset_size + 1; break; case asapo::SubSetMode::kMultiSource: header->subset_size = GetEventMonConfig()->subset_multisource_nsources; - header->subset_id = header->file_id; - header->file_id = GetEventMonConfig()->subset_multisource_sourceid; + header->id_in_subset = GetEventMonConfig()->subset_multisource_sourceid; break; } } diff --git a/receiver/src/request_handler/request_handler_db_check_request.cpp b/receiver/src/request_handler/request_handler_db_check_request.cpp index 23d88c10f215498c7d37afbe1eb0decb17d117bb..f2d6f0b9df1494e1fda3ab4b347e35ed5eb36774 100644 --- a/receiver/src/request_handler/request_handler_db_check_request.cpp +++ b/receiver/src/request_handler/request_handler_db_check_request.cpp @@ -29,10 +29,10 @@ Error RequestHandlerDbCheckRequest::GetRecordFromDb(const Request* request, File } return err; } else { - auto subset_id = request->GetCustomData()[1]; - err = db_client__->GetDataSetById(col_name, subset_id, id, record); + auto id_in_set = request->GetCustomData()[1]; + err = db_client__->GetDataSetById(col_name, id_in_set, id, record); if (!err) { - log__->Debug(std::string{"get subset record id "} + std::to_string(subset_id) + " from " + col_name + " in " + + log__->Debug(std::string{"get subset record id "} + std::to_string(id) + " from " + col_name + " in " + db_name_ + " at " + GetReceiverConfig()->database_uri); } return err; diff --git a/receiver/src/request_handler/request_handler_db_write.cpp b/receiver/src/request_handler/request_handler_db_write.cpp index fdeefa68c4c60d089a984eb7a7f110efcc76ee1b..6477b8fe4eb29cbb853f2b0513831995de0ba0cf 100644 --- a/receiver/src/request_handler/request_handler_db_write.cpp +++ b/receiver/src/request_handler/request_handler_db_write.cpp @@ -65,12 +65,13 @@ Error RequestHandlerDbWrite::InsertRecordToDb(const Request* request) const { " at " + GetReceiverConfig()->database_uri); } } else { - auto subset_id = request->GetCustomData()[1]; + auto subset_id = file_info.id; + file_info.id = request->GetCustomData()[1]; auto subset_size = request->GetCustomData()[2]; err = db_client__->InsertAsSubset(col_name, file_info, subset_id, subset_size, false); if (!err) { - log__->Debug(std::string{"insert record as subset id "} + std::to_string(subset_id) + ", id: " + - std::to_string(file_info.id) + " to " + col_name + " in " + + log__->Debug(std::string{"insert record as subset id "} + std::to_string(file_info.id) + ", id in subset: " + + std::to_string(subset_id) + " to " + col_name + " in " + db_name_ + " at " + GetReceiverConfig()->database_uri); } diff --git a/tests/automatic/broker/get_last/check_linux.sh b/tests/automatic/broker/get_last/check_linux.sh index 8bda2b9b4970ef403ff53751ed173bccd9bf49bb..f8d9b6d8c8e413c2d73f1c7d29668adf4620495e 100644 --- a/tests/automatic/broker/get_last/check_linux.sh +++ b/tests/automatic/broker/get_last/check_linux.sh @@ -26,21 +26,21 @@ brokerid=`echo $!` groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup` -curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/last?token=$token --stderr - +curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - -curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/last?token=$token --stderr - | grep '"_id":2' -curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/last?token=$token --stderr - | grep '"_id":2' +curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":2' +curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":2' echo "db.data_${substream}.insert({"_id":3})" | mongo ${database_name} -curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/last?token=$token --stderr - | grep '"_id":3' +curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":3' echo "db.data_${substream}.insert({"_id":4})" | mongo ${database_name} -curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | grep '"_id":4' -curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/last?token=$token --stderr - | grep '"_id":4' +curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | grep '"_id":1' +curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":4' #with a new group groupid=`curl -d '' --silent 127.0.0.1:5005/creategroup` curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/next?token=$token --stderr - | grep '"_id":1' -curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/${groupid}/last?token=$token --stderr - | grep '"_id":4' \ No newline at end of file +curl -v --silent 127.0.0.1:5005/database/data/stream/${substream}/0/last?token=$token --stderr - | grep '"_id":4' \ No newline at end of file diff --git a/tests/automatic/broker/get_last/check_windows.bat b/tests/automatic/broker/get_last/check_windows.bat index 2b7895dd0efa01c425a7eedce5f944ef29620bc5..ecfb48830ebb09bf9ed42cd45385db4d8f2b63f5 100644 --- a/tests/automatic/broker/get_last/check_windows.bat +++ b/tests/automatic/broker/get_last/check_windows.bat @@ -17,22 +17,22 @@ C:\Curl\curl.exe -d '' --silent 127.0.0.1:5005/creategroup > groupid set /P groupid=< groupid -C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/last?token=%token% --stderr - | findstr /c:\"_id\":2 || goto :error -C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/last?token=%token% --stderr - | findstr /c:\"_id\":2 || goto :error +C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":2 || goto :error +C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":2 || goto :error echo db.data_default.insert({"_id":3}) | %mongo_exe% %database_name% || goto :error -C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/last?token=%token% --stderr - | findstr /c:\"_id\":3 || goto :error +C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":3 || goto :error echo db.data_default.insert({"_id":4}) | %mongo_exe% %database_name% || goto :error -C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":4 || goto :error -C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/last?token=%token% --stderr - | findstr /c:\"_id\":4 || goto :error +C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1 || goto :error +C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":4 || goto :error C:\Curl\curl.exe -d '' --silent 127.0.0.1:5005/creategroup > groupid set /P groupid=< groupid C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/next?token=%token% --stderr - | findstr /c:\"_id\":1 || goto :error -C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/%groupid%/last?token=%token% --stderr - | findstr /c:\"_id\":4 || goto :error +C:\Curl\curl.exe -v --silent 127.0.0.1:5005/database/data/stream/default/0/last?token=%token% --stderr - | findstr /c:\"_id\":4 || goto :error goto :clean diff --git a/tests/automatic/consumer/consumer_api/check_linux.sh b/tests/automatic/consumer/consumer_api/check_linux.sh index 2a2a9629b82bc4f38bff373c676cb721b7a51e46..2ab95e58eca6f41b64e0b346cadd9ceb85abbb67 100644 --- a/tests/automatic/consumer/consumer_api/check_linux.sh +++ b/tests/automatic/consumer/consumer_api/check_linux.sh @@ -63,6 +63,18 @@ do echo 'db.data_default.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name} done +for i in `seq 1 5`; +do + images='' + for j in `seq 1 2`; + do + images="$images,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"meta":{"test":10}}" + done + images=${images#?} + echo 'db.data_incomplete.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name} +done + + echo hello1 > 1_1 -$@ 127.0.0.1:8400 $beamtime_id $token_test_run datasets +$@ 127.0.0.1:8400 $beamtime_id $token_test_run dataset diff --git a/tests/automatic/consumer/consumer_api/consumer_api.cpp b/tests/automatic/consumer/consumer_api/consumer_api.cpp index dc1ecce0fcb17335583d4581f10f59870eebbdac..3663163624f9206275efb9189e9009564eda3d7c 100644 --- a/tests/automatic/consumer/consumer_api/consumer_api.cpp +++ b/tests/automatic/consumer/consumer_api/consumer_api.cpp @@ -45,22 +45,21 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str M_AssertEq("hello1", std::string(data.get(), data.get() + fi.size)); - err = broker->GetLast(&fi, group_id, nullptr); + err = broker->GetLast(&fi, nullptr); M_AssertTrue(err == nullptr, "GetLast no error"); M_AssertTrue(fi.name == "10", "GetLast filename"); M_AssertTrue(fi.metadata == "{\"test\":10}", "GetLast metadata"); err = broker->GetNext(&fi, group_id, nullptr); - M_AssertTrue(err == asapo::ConsumerErrorTemplates::kEndOfStream, "GetNext2 error"); - auto error_data = static_cast<const asapo::ConsumerErrorData*>(err->GetCustomData()); - M_AssertTrue(error_data->id_max == 10, "GetNext2 id max"); + M_AssertTrue(err == nullptr, "GetNext2 no error"); + M_AssertTrue(fi.name == "2", "GetNext2 filename"); err = broker->SetLastReadMarker(2, group_id); M_AssertTrue(err == nullptr, "SetLastReadMarker no error"); - err = broker->GetById(8, &fi, group_id, nullptr); + err = broker->GetById(8, &fi, nullptr); M_AssertTrue(err == nullptr, "GetById error"); M_AssertTrue(fi.name == "8", "GetById filename"); @@ -69,7 +68,7 @@ void TestSingle(const std::unique_ptr<asapo::DataBroker>& broker, const std::str M_AssertTrue(fi.name == "3", "GetNext After GetById filename"); - err = broker->GetLast(&fi, group_id, nullptr); + err = broker->GetLast(&fi, nullptr); M_AssertTrue(err == nullptr, "GetLast2 no error"); @@ -207,7 +206,7 @@ void TestDataset(const std::unique_ptr<asapo::DataBroker>& broker, const std::st asapo::FileInfo fi; asapo::Error err; - auto dataset = broker->GetNextDataset(group_id, &err); + auto dataset = broker->GetNextDataset(group_id, 0, &err); if (err) { std::cout << err->Explain() << std::endl; } @@ -223,21 +222,56 @@ void TestDataset(const std::unique_ptr<asapo::DataBroker>& broker, const std::st M_AssertEq("hello1", std::string(data.get(), data.get() + dataset.content[0].size)); - dataset = broker->GetLastDataset(group_id, &err); + dataset = broker->GetLastDataset(0, &err); M_AssertTrue(err == nullptr, "GetLast no error"); M_AssertTrue(dataset.content[0].name == "10_1", "GetLastDataset filename"); M_AssertTrue(dataset.content[0].metadata == "{\"test\":10}", "GetLastDataset metadata"); - dataset = broker->GetNextDataset(group_id, &err); - M_AssertTrue(err != nullptr, "GetNextDataset2 error"); + dataset = broker->GetNextDataset(group_id, 0, &err); + M_AssertTrue(err == nullptr, "GetNextDataset2 no error"); + M_AssertTrue(dataset.content[0].name == "2_1", "GetNextDataSet2 filename"); - dataset = broker->GetLastDataset(group_id, &err); + dataset = broker->GetLastDataset(0, &err); M_AssertTrue(err == nullptr, "GetLastDataset2 no error"); - dataset = broker->GetDatasetById(8, group_id, &err); + dataset = broker->GetDatasetById(8, 0, &err); M_AssertTrue(err == nullptr, "GetDatasetById error"); M_AssertTrue(dataset.content[2].name == "8_3", "GetDatasetById filename"); +// incomplete datasets without min_size + + dataset = broker->GetNextDataset(group_id,"incomplete",0,&err); + M_AssertTrue(err == asapo::ConsumerErrorTemplates::kPartialData, "GetNextDataset incomplete error"); + M_AssertTrue(dataset.content.size() == 2, "GetNextDataset incomplete size"); + M_AssertTrue(dataset.content[0].name == "1_1", "GetNextDataset incomplete filename"); + auto err_data = static_cast<const asapo::PartialErrorData*>(err->GetCustomData()); + M_AssertTrue(err_data->expected_size == 3, "GetDatasetById expected size in error"); + M_AssertTrue(err_data->id == 1, "GetDatasetById expected id in error "); + M_AssertTrue(dataset.expected_size == 3, "GetDatasetById expected size"); + M_AssertTrue(dataset.id == 1, "GetDatasetById expected id"); + + dataset = broker->GetLastDataset("incomplete", 0, &err); + M_AssertTrue(err == asapo::ConsumerErrorTemplates::kEndOfStream, "GetLastDataset incomplete no data"); + + dataset = broker->GetDatasetById(2, "incomplete", 0, &err); + M_AssertTrue(err == asapo::ConsumerErrorTemplates::kPartialData, "GetDatasetById incomplete error"); + M_AssertTrue(dataset.content[0].name == "2_1", "GetDatasetById incomplete filename"); + +// incomplete datasets with min_size + + dataset = broker->GetNextDataset(group_id,"incomplete",2,&err); + M_AssertTrue(err == nullptr, "GetNextDataset incomplete minsize error"); + M_AssertTrue(dataset.id == 2, "GetDatasetById minsize id"); + + dataset = broker->GetLastDataset("incomplete", 2, &err); + M_AssertTrue(err == nullptr, "GetNextDataset incomplete minsize error"); + M_AssertTrue(dataset.id == 5, "GetLastDataset minsize id"); + + dataset = broker->GetDatasetById(2, "incomplete", 2, &err); + M_AssertTrue(err == nullptr, "GetDatasetById incomplete minsize error"); + M_AssertTrue(dataset.content[0].name == "2_1", "GetDatasetById incomplete minsize filename"); + + } void TestAll(const Args& args) { diff --git a/tests/automatic/consumer/consumer_api_python/check_linux.sh b/tests/automatic/consumer/consumer_api_python/check_linux.sh index eefb1258bb5c21d9d8781ec3f2e9bccfcefd1e5b..1f65c06141f909261993b8af5414353f72815fb9 100644 --- a/tests/automatic/consumer/consumer_api_python/check_linux.sh +++ b/tests/automatic/consumer/consumer_api_python/check_linux.sh @@ -72,5 +72,17 @@ do echo 'db.data_default.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name} >/dev/null done +for i in `seq 1 5`; +do + images='' + for j in `seq 1 2`; + do + images="$images,{"_id":$j,"size":6,"name":'${i}_${j}',"timestamp":1000,"source":'none',"buf_id":0,"meta":{"test":10}}" + done + images=${images#?} + echo 'db.data_incomplete.insert({"_id":'$i',"size":3,"images":['$images']})' | mongo ${database_name} +done + + $Python_EXECUTABLE $3/consumer_api.py 127.0.0.1:8400 $source_path $beamtime_id $token_test_run datasets diff --git a/tests/automatic/consumer/consumer_api_python/check_windows.bat b/tests/automatic/consumer/consumer_api_python/check_windows.bat index 97f89652c09257806c8f75b4d203c973bafc7973..5b3f47301867f1442e618a3ab0984fc07bae0336 100644 --- a/tests/automatic/consumer/consumer_api_python/check_windows.bat +++ b/tests/automatic/consumer/consumer_api_python/check_windows.bat @@ -35,6 +35,8 @@ echo db.dropDatabase() | %mongo_exe% %database_name% for /l %%x in (1, 1, 10) do echo db.data_default.insert({"_id":%%x,"size":3,"images":[{"_id":1, "size":6,"name":"%%x_1","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":3, "size":6,"name":"%%x_3","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name% || goto :error +for /l %%x in (1, 1, 5) do echo db.data_incomplete.insert({"_id":%%x,"size":3,"images":[{"_id":1, "size":6,"name":"%%x_1","timestamp":0,"source":"none","buf_id":0,"meta":{"test":10}},{"_id":2, "size":6,"name":"%%x_2","timestamp":1,"source":"none","buf_id":0,"meta":{"test":10}}]}) | %mongo_exe% %database_name% || goto :error + python %3/consumer_api.py 127.0.0.1:8400 %source_path% %beamtime_id% %token_test_run% datasets || goto :error diff --git a/tests/automatic/consumer/consumer_api_python/consumer_api.py b/tests/automatic/consumer/consumer_api_python/consumer_api.py index c7f6f9a448ab58bb731deb2f01d621bcdb4d8eed..f830ac0313838eaf04935f33f9d212bc80c4f517 100644 --- a/tests/automatic/consumer/consumer_api_python/consumer_api.py +++ b/tests/automatic/consumer/consumer_api_python/consumer_api.py @@ -6,100 +6,94 @@ import sys def exit_on_noerr(name): - print (name) + print(name) sys.exit(1) -def assert_metaname(meta,compare,name): - print ("asserting meta for "+name) +def assert_metaname(meta, compare, name): + print("asserting meta for " + name) if meta['name'] != compare: - print ("error at "+name) - print ('meta: ', json.dumps(meta, indent=4, sort_keys=True)) + print("error at " + name) + print('meta: ', json.dumps(meta, indent=4, sort_keys=True)) sys.exit(1) -def assert_usermetadata(meta,name): - print ("asserting usermetadata for "+name) + +def assert_usermetadata(meta, name): + print("asserting usermetadata for " + name) if meta['meta']['test'] != 10: - print ('meta: ', json.dumps(meta, indent=4, sort_keys=True)) - print ("error at "+name) - print ('meta: ', json.dumps(meta, indent=4, sort_keys=True)) + print('meta: ', json.dumps(meta, indent=4, sort_keys=True)) + print("error at " + name) + print('meta: ', json.dumps(meta, indent=4, sort_keys=True)) sys.exit(1) -def assert_eq(val,expected,name): - print ("asserting eq for "+name) +def assert_eq(val, expected, name): + print("asserting eq for " + name) if val != expected: - print ("error at "+name) - print ('val: ', val,' expected: ',expected) + print("error at " + name) + print('val: ', val, ' expected: ', expected) sys.exit(1) -def check_file_transfer_service(broker,group_id): - broker.set_timeout(1000) - data, meta = broker.get_by_id(1, group_id, meta_only=False) - assert_eq(data.tostring().decode("utf-8"),"hello1","check_file_transfer_service ok") - data, meta = broker.get_by_id(1, group_id,"streamfts", meta_only=False) - assert_eq(data.tostring().decode("utf-8"),"hello1","check_file_transfer_service with auto size ok") +def check_file_transfer_service(broker, group_id): + broker.set_timeout(1000) + data, meta = broker.get_by_id(1, meta_only=False) + assert_eq(data.tostring().decode("utf-8"), "hello1", "check_file_transfer_service ok") + data, meta = broker.get_by_id(1, "streamfts", meta_only=False) + assert_eq(data.tostring().decode("utf-8"), "hello1", "check_file_transfer_service with auto size ok") -def check_single(broker,group_id): +def check_single(broker, group_id): _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"1","get next1") - assert_usermetadata(meta,"get next1") + assert_metaname(meta, "1", "get next1") + assert_usermetadata(meta, "get next1") broker.set_timeout(1000) data = broker.retrieve_data(meta) - assert_eq(data.tostring().decode("utf-8"),"hello1","retrieve_data data") + assert_eq(data.tostring().decode("utf-8"), "hello1", "retrieve_data data") _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"2","get next2") - assert_usermetadata(meta,"get next2") + assert_metaname(meta, "2", "get next2") + assert_usermetadata(meta, "get next2") - _, meta = broker.get_last(group_id, meta_only=True) - assert_metaname(meta,"5","get last1") - assert_usermetadata(meta,"get last1") + _, meta = broker.get_last(meta_only=True) + assert_metaname(meta, "5", "get last1") + assert_usermetadata(meta, "get last1") try: - broker.get_by_id(30, group_id, meta_only=True) + broker.get_by_id(30, meta_only=True) except asapo_consumer.AsapoEndOfStreamError: pass else: exit_on_noerr("get_by_id no data") + _, meta = broker.get_next(group_id, meta_only=True) + assert_metaname(meta, "3", "get next3") - try: - _, meta = broker.get_next(group_id, meta_only=True) - except asapo_consumer.AsapoEndOfStreamError: - pass - else: - exit_on_noerr("get_next3") size = broker.get_current_size() - assert_eq(size,5,"get_current_size") - + assert_eq(size, 5, "get_current_size") broker.reset_lastread_marker(group_id) _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"1","get next4") - assert_usermetadata(meta,"get next4") - + assert_metaname(meta, "1", "get next4") + assert_usermetadata(meta, "get next4") - _, meta = broker.get_by_id(3, group_id, meta_only=True) - assert_metaname(meta,"3","get get_by_id") - assert_usermetadata(meta,"get get_by_id") + _, meta = broker.get_by_id(3, meta_only=True) + assert_metaname(meta, "3", "get get_by_id") + assert_usermetadata(meta, "get get_by_id") _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"2","get next5") - assert_usermetadata(meta,"get next5") - + assert_metaname(meta, "2", "get next5") + assert_usermetadata(meta, "get next5") broker.set_lastread_marker(4, group_id) _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"5","get next6") - assert_usermetadata(meta,"get next6") + assert_metaname(meta, "5", "get next6") + assert_usermetadata(meta, "get next6") try: broker.get_next("_wrong_group_name", meta_only=True) @@ -110,29 +104,29 @@ def check_single(broker,group_id): exit_on_noerr("should give wrong input error") try: - broker.get_last(group_id, meta_only=False) + broker.get_last(meta_only=False) except asapo_consumer.AsapoLocalIOError as err: print(err) pass else: exit_on_noerr("io error") - _, meta = broker.get_next(group_id,"stream1", meta_only=True) - assert_metaname(meta,"11","get next stream1") + _, meta = broker.get_next(group_id, "stream1", meta_only=True) + assert_metaname(meta, "11", "get next stream1") - _, meta = broker.get_next(group_id,"stream2", meta_only=True) - assert_metaname(meta,"21","get next stream2") + _, meta = broker.get_next(group_id, "stream2", meta_only=True) + assert_metaname(meta, "21", "get next stream2") substreams = broker.get_substream_list("") - assert_eq(len(substreams),4,"number of substreams") + assert_eq(len(substreams), 4, "number of substreams") print(substreams) - assert_eq(substreams[0]["name"],"default","substreams_name1") - assert_eq(substreams[1]["name"],"streamfts","substreams_name2") - assert_eq(substreams[2]["name"],"stream1","substreams_name2") - assert_eq(substreams[3]["name"],"stream2","substreams_name3") - assert_eq(substreams[1]["timestampCreated"],1000,"substreams_timestamp2") + assert_eq(substreams[0]["name"], "default", "substreams_name1") + assert_eq(substreams[1]["name"], "streamfts", "substreams_name2") + assert_eq(substreams[2]["name"], "stream1", "substreams_name2") + assert_eq(substreams[3]["name"], "stream2", "substreams_name3") + assert_eq(substreams[1]["timestampCreated"], 1000, "substreams_timestamp2") -#acks + # acks try: id = broker.get_last_acknowledged_tuple_id(group_id) except asapo_consumer.AsapoNoDataError as err: @@ -142,57 +136,55 @@ def check_single(broker,group_id): exit_on_noerr("get_last_acknowledged_tuple_id") nacks = broker.get_unacknowledged_tuple_ids(group_id) - assert_eq(len(nacks),5,"nacks default stream size = 5") + assert_eq(len(nacks), 5, "nacks default stream size = 5") - broker.acknowledge(group_id,1) + broker.acknowledge(group_id, 1) nacks = broker.get_unacknowledged_tuple_ids(group_id) - assert_eq(len(nacks),4,"nacks default stream size = 4") + assert_eq(len(nacks), 4, "nacks default stream size = 4") id = broker.get_last_acknowledged_tuple_id(group_id) - assert_eq(id,1,"last ack default stream id = 1") + assert_eq(id, 1, "last ack default stream id = 1") - broker.acknowledge(group_id,1,"stream1") + broker.acknowledge(group_id, 1, "stream1") nacks = broker.get_unacknowledged_tuple_ids(group_id) - assert_eq(len(nacks),4,"nacks stream1 size = 4 after ack") + assert_eq(len(nacks), 4, "nacks stream1 size = 4 after ack") -# neg acks + # neg acks broker.reset_lastread_marker(group_id) _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"1","get next neg ack before resend") + assert_metaname(meta, "1", "get next neg ack before resend") broker.reset_lastread_marker(group_id) _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"1","get next neg ack with resend") + assert_metaname(meta, "1", "get next neg ack with resend") -#resend + # resend broker.reset_lastread_marker(group_id) - broker.set_resend_nacs(True,0,1) + broker.set_resend_nacs(True, 0, 1) _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"1","get next before resend") + assert_metaname(meta, "1", "get next before resend") _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"1","get next with resend") + assert_metaname(meta, "1", "get next with resend") _, meta = broker.get_next(group_id, meta_only=True) - assert_metaname(meta,"2","get next after resend") - + assert_metaname(meta, "2", "get next after resend") -#images + # images images = broker.query_images("meta.test = 10") - assert_eq(len(images),5,"size of query answer 1") + assert_eq(len(images), 5, "size of query answer 1") for image in images: - assert_usermetadata(image,"query_images") - + assert_usermetadata(image, "query_images") - images = broker.query_images("meta.test = 10 AND name='1'") - assert_eq(len(images),1,"size of query answer 2 ") + images = broker.query_images("meta.test = 10 AND name='1'") + assert_eq(len(images), 1, "size of query answer 2 ") for image in images: - assert_usermetadata(image,"query_images") + assert_usermetadata(image, "query_images") images = broker.query_images("meta.test = 11") - assert_eq(len(images),0,"size of query answer 3 ") + assert_eq(len(images), 0, "size of query answer 3 ") try: images = broker.query_images("bla") @@ -201,54 +193,83 @@ def check_single(broker,group_id): else: exit_on_noerr("wrong query") - broker = asapo_consumer.create_server_broker("bla",path, True, beamtime,"",token,1000) + broker = asapo_consumer.create_server_broker("bla", path, True, beamtime, "", token, 1000) try: - broker.get_last(group_id, meta_only=True) + broker.get_last(meta_only=True) except asapo_consumer.AsapoUnavailableServiceError as err: print(err) pass else: exit_on_noerr("AsapoBrokerServersNotFound") -def check_dataset(broker,group_id): - id, metas = broker.get_next_dataset(group_id) - assert_eq(id,1,"get_next_dataset1") - assert_metaname(metas[0],"1_1","get nextdataset1 name1") - assert_metaname(metas[1],"1_2","get nextdataset1 name2") - assert_usermetadata(metas[0],"get nextdataset1 meta") + +def check_dataset(broker, group_id): + res = broker.get_next_dataset(group_id) + assert_eq(res['id'], 1, "get_next_dataset1") + assert_metaname(res['content'][0], "1_1", "get nextdataset1 name1") + assert_metaname(res['content'][1], "1_2", "get nextdataset1 name2") + assert_usermetadata(res['content'][0], "get nextdataset1 meta") broker.set_timeout(1000) - data = broker.retrieve_data(metas[0]) - assert_eq(data.tostring().decode("utf-8"),"hello1","retrieve_data from dataset data") + data = broker.retrieve_data(res['content'][0]) + assert_eq(data.tostring().decode("utf-8"), "hello1", "retrieve_data from dataset data") + + res = broker.get_next_dataset(group_id) + assert_eq(res['id'], 2, "get_next_dataset2") + assert_metaname(res['content'][0], "2_1", "get nextdataset2 name1") + res = broker.get_last_dataset() + assert_eq(res['id'], 10, "get_last_dataset1") + assert_eq(res['expected_size'], 3, "get_last_dataset1 size ") + assert_metaname(res['content'][2], "10_3", "get get_last_dataset1 name3") - id, metas = broker.get_next_dataset(group_id) - assert_eq(id,2,"get_next_dataset2") - assert_metaname(metas[0],"2_1","get nextdataset2 name1") + res = broker.get_next_dataset(group_id) + assert_eq(res['id'], 3, "get_next_dataset3") - id, metas = broker.get_last_dataset(group_id) - assert_eq(id,10,"get_last_dataset1") - assert_metaname(metas[2],"10_3","get get_last_dataset1 name3") + res = broker.get_dataset_by_id(8) + assert_eq(res['id'], 8, "get_dataset_by_id1 id") + assert_metaname(res['content'][2], "8_3", "get get_dataset_by_id1 name3") + # incomplete datesets without min_size given try: - id, metas = broker.get_next_dataset(group_id) - except asapo_consumer.AsapoEndOfStreamError as err: - assert_eq(err.id_max,10,"get_next_dataset3 id_max") + broker.get_next_dataset(group_id, "incomplete") + except asapo_consumer.AsapoPartialDataError as err: + assert_eq(err.partial_data['expected_size'], 3, "get_next_dataset incomplete expected size") + assert_eq(err.partial_data['id'], 1, "get_next_dataset incomplete id") + assert_eq(err.partial_data['content'][0]['name'], '1_1', "get_next_dataset content 1") + assert_eq(err.partial_data['content'][1]['name'], '1_2', "get_next_dataset content 2") pass else: - exit_on_noerr("get_next_dataset3 err") + exit_on_noerr("get_next_dataset incomplete err") - id, metas = broker.get_dataset_by_id(8,group_id) - assert_eq(id,8,"get_dataset_by_id1 id") - assert_metaname(metas[2],"8_3","get get_dataset_by_id1 name3") + try: + broker.get_dataset_by_id(2, "incomplete") + except asapo_consumer.AsapoPartialDataError as err: + assert_eq(err.partial_data['expected_size'], 3, "get_next_dataset incomplete expected size") + assert_eq(err.partial_data['id'], 2, "get_next_dataset incomplete id") + assert_eq(err.partial_data['content'][0]['name'], '2_1', "get_next_dataset content 1") + assert_eq(err.partial_data['content'][1]['name'], '2_2', "get_next_dataset content 2") + pass + else: + exit_on_noerr("get_next_dataset incomplete err") try: - id, metas = broker.get_next_dataset(group_id) - except: + broker.get_last_dataset("incomplete") + except asapo_consumer.AsapoEndOfStreamError as err: pass else: - exit_on_noerr("get_next_dataset4 get next6err") + exit_on_noerr("get_last_dataset incomplete err") + # incomplete with min_size given + res = broker.get_next_dataset(group_id, "incomplete", min_size=2) + assert_eq(res['id'], 2, "get_next_dataset incomplete with minsize") + + res = broker.get_last_dataset("incomplete", min_size=2) + assert_eq(res['id'], 5, "get_last_dataset incomplete with minsize") + + res = broker.get_dataset_by_id(2, "incomplete", min_size=1) + assert_eq(res['id'], 2, "get_dataset_by_id incomplete with minsize") + source, path, beamtime, token, mode = sys.argv[1:] @@ -257,15 +278,14 @@ broker_fts = asapo_consumer.create_server_broker(source, path, False, beamtime, group_id = broker.generate_group_id() - group_id_fts = broker_fts.generate_group_id() if mode == "single": - check_single(broker,group_id) - check_file_transfer_service(broker_fts,group_id_fts) + check_single(broker, group_id) + check_file_transfer_service(broker_fts, group_id_fts) if mode == "datasets": - check_dataset(broker,group_id) + check_dataset(broker, group_id) -print ("tests done") +print("tests done") sys.exit(0) diff --git a/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp b/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp index d54e63487b1af0a26be2d1170d2a2ca207db81b5..8e04f6e4790e39145b3217d6438b0db8a4104606 100644 --- a/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp +++ b/tests/automatic/mongo_db/insert_retrieve_dataset/insert_retrieve_dataset_mongodb.cpp @@ -17,8 +17,8 @@ void Assert(const Error& error, const std::string& expect) { } struct Args { - std::string keyword; - int file_id; + std::string keyword; + int file_id; }; Args GetArgs(int argc, char* argv[]) { @@ -64,7 +64,7 @@ int main(int argc, char* argv[]) { if (args.keyword == "OK") { // check retrieve asapo::FileInfo fi_db; - err = db.GetDataSetById("test", subset_id, fi.id, &fi_db); + err = db.GetDataSetById("test", fi.id,subset_id, &fi_db); M_AssertTrue(fi_db == fi, "get record from db"); M_AssertEq(nullptr, err); err = db.GetDataSetById("test", 0, 0, &fi_db); diff --git a/tests/automatic/producer/python_api/producer_api.py b/tests/automatic/producer/python_api/producer_api.py index dd247424c2f3eb74c33f3ec0215d01e9c1dc1bd2..abc8cac732591fada0b7daf10ce9e82921e8d536 100644 --- a/tests/automatic/producer/python_api/producer_api.py +++ b/tests/automatic/producer/python_api/producer_api.py @@ -50,9 +50,9 @@ producer.send_file(10, local_path="./file1", exposed_path="processed/" + stream user_meta='{"test_key":"test_val"}', callback=None) # send subsets -producer.send_file(2, local_path="./file1", exposed_path="processed/" + stream + "/" + "file2", subset=(2, 2), +producer.send_file(2, local_path="./file1", exposed_path="processed/" + stream + "/" + "file2", subset=(1, 2), user_meta='{"test_key":"test_val"}', callback=callback) -producer.send_file(3, local_path="./file1", exposed_path="processed/" + stream + "/" + "file3", subset=(2, 2), +producer.send_file(2, local_path="./file1", exposed_path="processed/" + stream + "/" + "file3", subset=(2, 2), user_meta='{"test_key":"test_val"}', callback=callback) # send meta only @@ -91,8 +91,8 @@ else: sys.exit(1) try: - producer.send_file(0, local_path="./not_exist", exposed_path="./whatever", - ingest_mode=asapo_producer.INGEST_MODE_TRANSFER_METADATA_ONLY, callback=callback) + producer.send_data(0, "processed/" + stream + "/" + "file6", b"hello", + ingest_mode=asapo_producer.DEFAULT_INGEST_MODE, callback=callback) except asapo_producer.AsapoWrongInputError as e: print(e) else: diff --git a/tests/manual/performance_broker_receiver/getlast_broker.cpp b/tests/manual/performance_broker_receiver/getlast_broker.cpp index 658af3435661e504e64bad6d4a90f0b40d9adb42..b73a0101671d7ace2adada85d23aa52fdbe1581d 100644 --- a/tests/manual/performance_broker_receiver/getlast_broker.cpp +++ b/tests/manual/performance_broker_receiver/getlast_broker.cpp @@ -81,7 +81,7 @@ std::vector<std::thread> StartThreads(const Args& params, while (std::chrono::duration_cast<std::chrono::milliseconds>(system_clock::now() - start).count() < params.timeout_ms) { if (params.datasets) { - auto dataset = broker->GetLastDataset(group_id, &err); + auto dataset = broker->GetLastDataset(0, &err); if (err == nullptr) { for (auto& fi : dataset.content) { (*nbuf)[i] += fi.buf_id == 0 ? 0 : 1; @@ -89,7 +89,7 @@ std::vector<std::thread> StartThreads(const Args& params, } } } else { - err = broker->GetLast(&fi, group_id, params.read_data ? &data : nullptr); + err = broker->GetLast(&fi, params.read_data ? &data : nullptr); if (err == nullptr) { (*nbuf)[i] += fi.buf_id == 0 ? 0 : 1; if (params.read_data && (*nfiles)[i] < 10 && fi.size < 10) {