Jobs#
Define API Jobs.
-
class
gcloud.bigquery.job.Compression(name)[source]# Bases:
gcloud.bigquery.job._EnumPropertyPseudo-enum for
compressionproperties.-
ALLOWED= ('GZIP', 'NONE')#
-
GZIP= 'GZIP'#
-
NONE= 'NONE'#
-
-
class
gcloud.bigquery.job.CopyJob(name, destination, sources, client)[source]# Bases:
gcloud.bigquery.job._BaseJobAsynchronous job: copy data into a table from other tables.
Parameters: - name (string) – the name of the job
- destination (
gcloud.bigquery.table.Table) – Table into which data is to be loaded. - sources (list of
gcloud.bigquery.table.Table) – Table into which data is to be loaded. - client (
gcloud.bigquery.client.Client) – A client which holds credentials and project configuration for the dataset (which requires a project).
-
create_disposition# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.createDisposition
-
class
gcloud.bigquery.job.CreateDisposition(name)[source]# Bases:
gcloud.bigquery.job._EnumPropertyPseudo-enum for
create_dispositionproperties.-
ALLOWED= ('CREATE_IF_NEEDED', 'CREATE_NEVER')#
-
CREATE_IF_NEEDED= 'CREATE_IF_NEEDED'#
-
CREATE_NEVER= 'CREATE_NEVER'#
-
-
class
gcloud.bigquery.job.DestinationFormat(name)[source]# Bases:
gcloud.bigquery.job._EnumPropertyPseudo-enum for
destination_formatproperties.-
ALLOWED= ('CSV', 'NEWLINE_DELIMITED_JSON', 'AVRO')#
-
AVRO= 'AVRO'#
-
CSV= 'CSV'#
-
NEWLINE_DELIMITED_JSON= 'NEWLINE_DELIMITED_JSON'#
-
-
class
gcloud.bigquery.job.Encoding(name)[source]# Bases:
gcloud.bigquery.job._EnumPropertyPseudo-enum for
encodingproperties.-
ALLOWED= ('UTF-8', 'ISO-8559-1')#
-
ISO_8559_1= 'ISO-8559-1'#
-
UTF_8= 'UTF-8'#
-
-
class
gcloud.bigquery.job.ExtractTableToStorageJob(name, source, destination_uris, client)[source]# Bases:
gcloud.bigquery.job._BaseJobAsynchronous job: extract data from a table into Cloud Storage.
Parameters: - name (string) – the name of the job
- source (
gcloud.bigquery.table.Table) – Table into which data is to be loaded. - destination_uris (list of string) – URIs describing Cloud Storage blobs into which extracted data will be written.
- client (
gcloud.bigquery.client.Client) – A client which holds credentials and project configuration for the dataset (which requires a project).
-
compression# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.compression
-
destination_format# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.destinationFormat
-
class
gcloud.bigquery.job.LoadTableFromStorageJob(name, destination, source_uris, client, schema=())[source]# Bases:
gcloud.bigquery.job._BaseJobAsynchronous job for loading data into a table from CloudStorage.
Parameters: - name (string) – the name of the job
- destination (
gcloud.bigquery.table.Table) – Table into which data is to be loaded. - source_uris (sequence of string) – URIs of data files to be loaded.
- client (
gcloud.bigquery.client.Client) – A client which holds credentials and project configuration for the dataset (which requires a project). - schema (list of
gcloud.bigquery.table.SchemaField) – The job’s schema
-
allow_jagged_rows# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowJaggedRows
-
allow_quoted_newlines# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowQuotedNewlines
-
create_disposition# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.createDisposition
-
field_delimiter# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.fieldDelimiter
-
ignore_unknown_values# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.ignoreUnknownValues
-
input_file_bytes# Count of bytes loaded from source files.
Return type: integer, or NoneTypeReturns: the count (None until set from the server).
-
input_files# Count of source files.
Return type: integer, or NoneTypeReturns: the count (None until set from the server).
-
max_bad_records# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.maxBadRecords
-
output_bytes# Count of bytes saved to destination table.
Return type: integer, or NoneTypeReturns: the count (None until set from the server).
-
output_rows# Count of rows saved to destination table.
Return type: integer, or NoneTypeReturns: the count (None until set from the server).
-
quote_character# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.quote
-
schema# Table’s schema.
Return type: list of SchemaFieldReturns: fields describing the schema
-
skip_leading_rows# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.skipLeadingRows
-
class
gcloud.bigquery.job.QueryPriority(name)[source]# Bases:
gcloud.bigquery.job._EnumPropertyPseudo-enum for
RunQueryJob.priorityproperty.-
ALLOWED= ('INTERACTIVE', 'BATCH')#
-
BATCH= 'BATCH'#
-
INTERACTIVE= 'INTERACTIVE'#
-
-
class
gcloud.bigquery.job.RunQueryJob(name, query, client)[source]# Bases:
gcloud.bigquery.job._BaseJobAsynchronous job: query tables.
Parameters: - name (string) – the name of the job
- query (string) – SQL query string
- client (
gcloud.bigquery.client.Client) – A client which holds credentials and project configuration for the dataset (which requires a project).
-
allow_large_results# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.allowLargeResults
-
create_disposition# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.createDisposition
-
default_dataset# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.default_dataset
-
destination_table# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.destinationTable
-
flatten_results# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.flattenResults
-
priority# See: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.priority
-
class
gcloud.bigquery.job.SourceFormat(name)[source]# Bases:
gcloud.bigquery.job._EnumPropertyPseudo-enum for
source_formatproperties.-
ALLOWED= ('CSV', 'DATASTORE_BACKUP', 'NEWLINE_DELIMITED_JSON')#
-
CSV= 'CSV'#
-
DATASTORE_BACKUP= 'DATASTORE_BACKUP'#
-
NEWLINE_DELIMITED_JSON= 'NEWLINE_DELIMITED_JSON'#
-
-
class
gcloud.bigquery.job.WriteDisposition(name)[source]# Bases:
gcloud.bigquery.job._EnumPropertyPseudo-enum for
write_dispositionproperties.-
ALLOWED= ('WRITE_APPEND', 'WRITE_TRUNCATE', 'WRITE_EMPTY')#
-
WRITE_APPEND= 'WRITE_APPEND'#
-
WRITE_EMPTY= 'WRITE_EMPTY'#
-
WRITE_TRUNCATE= 'WRITE_TRUNCATE'#
-