diff --git a/consumer/api/python/asapo_consumer.pyx.in b/consumer/api/python/asapo_consumer.pyx.in
index 1e54f8b232f01539a6fa63b1c14f8945161a3739..3eec49395a5c3ef355704d099c35cd46a55bbbf3 100644
--- a/consumer/api/python/asapo_consumer.pyx.in
+++ b/consumer/api/python/asapo_consumer.pyx.in
@@ -448,8 +448,8 @@ cdef class PyConsumer:
         if op == "next":
             with nogil:
                 dataset = self.c_consumer.get().GetNextDataset(b_group_id, min_size,b_stream,&err)
-        elif op == "next_available":    
-                dataset = self.c_consumer.get().GetNextDataset(b_group_id, min_size,b_stream,True,&err)                
+        elif op == "next_available":
+                dataset = self.c_consumer.get().GetNextDataset(b_group_id, min_size,b_stream,True,&err)
         elif op == "last" and group_id == "":
             with nogil:
                 dataset = self.c_consumer.get().GetLastDataset(min_size,b_stream, &err)
@@ -468,9 +468,9 @@ cdef class PyConsumer:
         return res
     def get_next_dataset(self, group_id, min_size = 0, stream = "default", ordered=True):
         """ Return dataset, as a list of substream-messages metadata, that corresponds to different
-        substreams of a given dataset. Each metadata is a python dict, that 
+        substreams of a given dataset. Each metadata is a python dict, that
         contains user part (if given) and service part. Corresponding data can be retrieved
-        via `retrieve_data` function. 
+        via `retrieve_data` function.
 
         Function iterates across stream for given consumer group (create one, if not exists).
         For each consumer group each message will be returned only once.
@@ -492,7 +492,7 @@ cdef class PyConsumer:
         if ordered:
             return self._op_dataset("next_available",group_id,stream,min_size,0)
         else:
-            return self._op_dataset("next",group_id,stream,min_size,0)           
+            return self._op_dataset("next",group_id,stream,min_size,0)
     def get_last_dataset(self, min_size = 0,  stream = "default", group_id = ""):
         return self._op_dataset("last",group_id,stream,min_size,0)
     def get_dataset_by_id(self, uint64_t id, min_size = 0, stream = "default"):
@@ -552,14 +552,12 @@ def create_consumer(server_name,source_path,has_filesystem,beamtime_id,data_sour
       :type source_path: string
       :param has_filesystem: True if the source_path is accessible for client, otherwise will use file asapo transfer service to get data
       :type has_filesystem: bool
-      :param beamline: beamline name, can be "auto" if beamtime_id is given
-      :type beamline: string
+      :param beamtime_id: ID of the beamtime the data belongs to
+      :type beamtime_id: string
       :param data_source: name of the data source that produces data
       :type data_source: string
       :param token: authorization token
       :type token: string
-      :param nthreads: ingest mode flag
-      :type nthreads: int
       :param timeout_ms: send requests timeout in milliseconds
       :type timeout_ms: int
       :param instance_id: instance id, can be "auto" (default), will create a combination out of hostname and pid
diff --git a/producer/api/python/asapo_producer.pyx.in b/producer/api/python/asapo_producer.pyx.in
index 65fd067f8ca2efbea235f7453982c88b5ead4d10..080976fe180c9bde023f46ecd02d0e5d399335c4 100644
--- a/producer/api/python/asapo_producer.pyx.in
+++ b/producer/api/python/asapo_producer.pyx.in
@@ -472,6 +472,8 @@ def create_producer(endpoint,type,beamtime_id,beamline,data_source,token,nthread
          :type endpoint: string
          :param type: source type, "raw" to write to "raw" folder in beamline filesystem,"processed" to write to "processed" folder in core filesystem
          :type type: string
+         :param beamtime_id: ID of the beamtime the data belongs to, can be "auto" if beamline is given
+         :type beamtime_id: string
          :param beamline: beamline name, can be "auto" if beamtime_id is given
          :type beamline: string
          :param data_source: name of the data source that produces data