Interface LazyDataFrame

Representation of a Lazy computation graph / query.

interface LazyDataFrame {
    [inspect](): string;
    [toStringTag]: string;
    cache(): LazyDataFrame;
    clone(): LazyDataFrame;
    collect(opts?: LazyOptions): Promise<pl.DataFrame<any>>;
    collectSync(opts?: LazyOptions): pl.DataFrame<any>;
    get columns(): string[];
    describeOptimizedPlan(opts?: LazyOptions): string;
    describePlan(): string;
    distinct(maintainOrder?: boolean, subset?: ColumnSelection, keep?: "first" | "last"): LazyDataFrame;
    distinct(opts: {
        keep?: "first" | "last";
        maintainOrder?: boolean;
        subset?: ColumnSelection;
    }): LazyDataFrame;
    drop(name: string): LazyDataFrame;
    drop(names: string[]): LazyDataFrame;
    drop(name: string, ...names: string[]): LazyDataFrame;
    dropNulls(column: string): LazyDataFrame;
    dropNulls(columns: string[]): LazyDataFrame;
    dropNulls(...columns: string[]): LazyDataFrame;
    explode(column: ExprOrString): LazyDataFrame;
    explode(columns: ExprOrString[]): LazyDataFrame;
    explode(column: ExprOrString, ...columns: ExprOrString[]): LazyDataFrame;
    fetch(numRows?: number): Promise<pl.DataFrame<any>>;
    fetch(numRows: number, opts: LazyOptions): Promise<pl.DataFrame<any>>;
    fetchSync(numRows?: number): pl.DataFrame<any>;
    fetchSync(numRows: number, opts: LazyOptions): pl.DataFrame<any>;
    fillNull(fillValue: string | number | pl.Expr): LazyDataFrame;
    filter(predicate: string | pl.Expr): LazyDataFrame;
    first(): pl.DataFrame<any>;
    groupBy(by: ColumnsOrExpr, maintainOrder?: boolean): LazyGroupBy;
    groupBy(by: ColumnsOrExpr, opts: {
        maintainOrder: boolean;
    }): LazyGroupBy;
    groupByDynamic(options: {
        by?: ColumnsOrExpr;
        check_sorted?: boolean;
        closed?:
            | "none"
            | "left"
            | "right"
            | "both";
        every: string;
        includeBoundaries?: boolean;
        indexColumn: string;
        offset?: string;
        period?: string;
        start_by: StartBy;
    }): LazyGroupBy;
    groupByRolling(opts: {
        by?: ColumnsOrExpr;
        check_sorted?: boolean;
        closed?:
            | "none"
            | "left"
            | "right"
            | "both";
        indexColumn: ColumnsOrExpr;
        offset?: string;
        period: string;
    }): LazyGroupBy;
    head(length?: number): LazyDataFrame;
    inner(): any;
    join(other: LazyDataFrame, joinOptions: {
        on: ValueOrArray<string | pl.Expr>;
    } & LazyJoinOptions): LazyDataFrame;
    join(other: LazyDataFrame, joinOptions: {
        leftOn: ValueOrArray<string | pl.Expr>;
        rightOn: ValueOrArray<string | pl.Expr>;
    } & LazyJoinOptions): LazyDataFrame;
    join(other: LazyDataFrame, options: {
        allowParallel?: boolean;
        forceParallel?: boolean;
        how: "cross";
        suffix?: string;
    }): LazyDataFrame;
    joinAsof(other: LazyDataFrame, options: {
        allowParallel?: boolean;
        by?: string | string[];
        byLeft?: string | string[];
        byRight?: string | string[];
        forceParallel?: boolean;
        leftOn?: string;
        on?: string;
        rightOn?: string;
        strategy?: "backward" | "forward" | "nearest";
        suffix?: string;
        tolerance?: string | number;
    }): LazyDataFrame;
    last(): LazyDataFrame;
    limit(n?: number): LazyDataFrame;
    max(): LazyDataFrame;
    mean(): LazyDataFrame;
    median(): LazyDataFrame;
    melt(idVars: ColumnSelection, valueVars: ColumnSelection): LazyDataFrame;
    min(): LazyDataFrame;
    quantile(quantile: number): LazyDataFrame;
    rename(mapping: Record<string, string>): LazyDataFrame;
    reverse(): LazyDataFrame;
    select(column: ExprOrString): LazyDataFrame;
    select(columns: ExprOrString[]): LazyDataFrame;
    select(...columns: ExprOrString[]): LazyDataFrame;
    serialize(format: "json" | "bincode"): Buffer;
    shift(periods: number): LazyDataFrame;
    shift(opts: {
        periods: number;
    }): LazyDataFrame;
    shiftAndFill(n: number, fillValue: number): LazyDataFrame;
    shiftAndFill(opts: {
        fillValue: number;
        n: number;
    }): LazyDataFrame;
    sinkCSV(path: string, options?: SinkCsvOptions): void;
    sinkParquet(path: string, options?: SinkParquetOptions): void;
    slice(offset: number, length: number): LazyDataFrame;
    slice(opts: {
        length: number;
        offset: number;
    }): LazyDataFrame;
    sort(by: ColumnsOrExpr, descending?: ValueOrArray<boolean>, nullsLast?: boolean, maintainOrder?: boolean): LazyDataFrame;
    sort(opts: {
        by: ColumnsOrExpr;
        descending?: ValueOrArray<boolean>;
        maintainOrder?: boolean;
        nullsLast?: boolean;
    }): LazyDataFrame;
    std(): LazyDataFrame;
    sum(): LazyDataFrame;
    tail(length?: number): LazyDataFrame;
    toJSON(): string;
    unique(maintainOrder?: boolean, subset?: ColumnSelection, keep?: "first" | "last"): LazyDataFrame;
    unique(opts: {
        keep?: "first" | "last";
        maintainOrder?: boolean;
        subset?: ColumnSelection;
    }): LazyDataFrame;
    unpivot(idVars: ColumnSelection, valueVars: ColumnSelection): LazyDataFrame;
    var(): LazyDataFrame;
    withColumn(expr: pl.Expr): LazyDataFrame;
    withColumnRenamed(existing: string, replacement: string): LazyDataFrame;
    withColumns(exprs: (pl.Expr | pl.Series<any, string>)[]): LazyDataFrame;
    withColumns(...exprs: (pl.Expr | pl.Series<any, string>)[]): LazyDataFrame;
    withRowCount(): LazyDataFrame;
}

Hierarchy

  • Serialize
  • GroupByOps<LazyGroupBy>
    • LazyDataFrame

Properties

[toStringTag]: string

Accessors

Methods

  • Collect into a DataFrame. Note: use fetch if you want to run this query on the first n rows only. This can be a huge time saver in debugging queries.

    Parameters

    Returns Promise<pl.DataFrame<any>>

    DataFrame

  • A string representation of the optimized query plan.

    Parameters

    Returns string

  • A string representation of the unoptimized query plan.

    Returns string

  • Drop duplicate rows from this DataFrame. Note that this fails if there is a column of type List in the DataFrame.

    Parameters

    • OptionalmaintainOrder: boolean
    • Optionalsubset: ColumnSelection

      subset to drop duplicates for

    • Optionalkeep: "first" | "last"

      "first" | "last"

    Returns LazyDataFrame

    0.4.0

    unique

  • Parameters

    • opts: {
          keep?: "first" | "last";
          maintainOrder?: boolean;
          subset?: ColumnSelection;
      }
      • Optionalkeep?: "first" | "last"
      • OptionalmaintainOrder?: boolean
      • Optionalsubset?: ColumnSelection

    Returns LazyDataFrame

  • Fetch is like a collect operation, but it overwrites the number of rows read by every scan

    Note that the fetch does not guarantee the final number of rows in the DataFrame. Filter, join operations and a lower number of rows available in the scanned file influence the final number of rows.

    Parameters

    • OptionalnumRows: number

      collect 'n' number of rows from data source

    Returns Promise<pl.DataFrame<any>>

  • Parameters

    Returns Promise<pl.DataFrame<any>>

  • Filter the rows in the DataFrame based on a predicate expression.

    Parameters

    • predicate: string | pl.Expr

      Expression that evaluates to a boolean Series.

    Returns LazyDataFrame

    > lf = pl.DataFrame({
    > "foo": [1, 2, 3],
    > "bar": [6, 7, 8],
    > "ham": ['a', 'b', 'c']
    > }).lazy()
    > // Filter on one condition
    > lf.filter(pl.col("foo").lt(3)).collect()
    shape: (2, 3)
    ┌─────┬─────┬─────┐
    foobarham
    │ --- ┆ --- ┆ --- │
    i64i64str
    ╞═════╪═════╪═════╡
    16a
    ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
    27b
    └─────┴─────┴─────┘
  • Start a groupby operation.

    Parameters

    • by: ColumnsOrExpr
    • OptionalmaintainOrder: boolean

    Returns LazyGroupBy

  • Parameters

    • by: ColumnsOrExpr
    • opts: {
          maintainOrder: boolean;
      }
      • maintainOrder: boolean

    Returns LazyGroupBy

  • Groups based on a time value (or index value of type Int32, Int64). Time windows are calculated and rows are assigned to windows. Different from a normal groupby is that a row can be member of multiple groups. The time/index window could be seen as a rolling window, with a window size determined by dates/times/values instead of slots in the DataFrame.

    A window is defined by:

    • every: interval of the window
    • period: length of the window
    • offset: offset of the window

    The every, period and offset arguments are created with the following string language:

    • 1ns (1 nanosecond)
    • 1us (1 microsecond)
    • 1ms (1 millisecond)
    • 1s (1 second)
    • 1m (1 minute)
    • 1h (1 hour)
    • 1d (1 day)
    • 1w (1 week)
    • 1mo (1 calendar month)
    • 1y (1 calendar year)
    • 1i (1 index count)

    Or combine them: "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

    In case of a groupbyDynamic on an integer column, the windows are defined by:

    • "1i" # length 1
    • "10i" # length 10

    Parameters

    • options: {
          by?: ColumnsOrExpr;
          check_sorted?: boolean;
          closed?:
              | "none"
              | "left"
              | "right"
              | "both";
          every: string;
          includeBoundaries?: boolean;
          indexColumn: string;
          offset?: string;
          period?: string;
          start_by: StartBy;
      }
      • Optionalby?: ColumnsOrExpr
      • Optionalcheck_sorted?: boolean
      • Optionalclosed?:
            | "none"
            | "left"
            | "right"
            | "both"
      • every: string
      • OptionalincludeBoundaries?: boolean
      • indexColumn: string
      • Optionaloffset?: string
      • Optionalperiod?: string
      • start_by: StartBy

    Returns LazyGroupBy

  • Create rolling groups based on a time column (or index value of type Int32, Int64).

    Different from a rolling groupby the windows are now determined by the individual values and are not of constant intervals. For constant intervals use groupByDynamic

    The period and offset arguments are created with the following string language:

    • 1ns (1 nanosecond)
    • 1us (1 microsecond)
    • 1ms (1 millisecond)
    • 1s (1 second)
    • 1m (1 minute)
    • 1h (1 hour)
    • 1d (1 day)
    • 1w (1 week)
    • 1mo (1 calendar month)
    • 1y (1 calendar year)
    • 1i (1 index count)

    Or combine them: "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

    In case of a groupby_rolling on an integer column, the windows are defined by:

    • "1i" # length 1
    • "10i" # length 10

    Parameters

    • opts: {
          by?: ColumnsOrExpr;
          check_sorted?: boolean;
          closed?:
              | "none"
              | "left"
              | "right"
              | "both";
          indexColumn: ColumnsOrExpr;
          offset?: string;
          period: string;
      }
      • Optionalby?: ColumnsOrExpr
      • Optionalcheck_sorted?: boolean
      • Optionalclosed?:
            | "none"
            | "left"
            | "right"
            | "both"
      • indexColumn: ColumnsOrExpr
      • Optionaloffset?: string
      • period: string

    Returns LazyGroupBy


    >dates = [
    ... "2020-01-01 13:45:48",
    ... "2020-01-01 16:42:13",
    ... "2020-01-01 16:45:09",
    ... "2020-01-02 18:12:48",
    ... "2020-01-03 19:45:32",
    ... "2020-01-08 23:16:43",
    ... ]
    >df = pl.DataFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).withColumn(
    ... pl.col("dt").str.strptime(pl.Datetime)
    ... )
    >out = df.groupbyRolling({indexColumn:"dt", period:"2d"}).agg(
    ... [
    ... pl.sum("a").alias("sum_a"),
    ... pl.min("a").alias("min_a"),
    ... pl.max("a").alias("max_a"),
    ... ]
    ... )
    >assert(out["sum_a"].toArray() === [3, 10, 15, 24, 11, 1])
    >assert(out["max_a"].toArray() === [3, 7, 7, 9, 9, 1])
    >assert(out["min_a"].toArray() === [3, 3, 3, 3, 2, 1])
    >out
    shape: (6, 4)
    ┌─────────────────────┬───────┬───────┬───────┐
    dta_suma_maxa_min
    │ --- ┆ --- ┆ --- ┆ --- │
    datetime[ms] ┆ i64i64i64
    ╞═════════════════════╪═══════╪═══════╪═══════╡
    2020-01-01 13:45:48333
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
    2020-01-01 16:42:131073
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
    2020-01-01 16:45:091573
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
    2020-01-02 18:12:482493
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
    2020-01-03 19:45:321192
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
    2020-01-08 23:16:43111
    └─────────────────────┴───────┴───────┴───────┘
  • Gets the first n rows of the DataFrame. You probably don't want to use this!

    Consider using the fetch operation. The fetch operation will truly load the first nrows lazily.

    Parameters

    • Optionallength: number

    Returns LazyDataFrame

  • SQL like joins.

    Parameters

    Returns LazyDataFrame

    >>> const df = pl.DataFrame({
    >>> foo: [1, 2, 3],
    >>> bar: [6.0, 7.0, 8.0],
    >>> ham: ['a', 'b', 'c'],
    >>> }).lazy()
    >>>
    >>> const otherDF = pl.DataFrame({
    >>> apple: ['x', 'y', 'z'],
    >>> ham: ['a', 'b', 'd'],
    >>> }).lazy();
    >>> const result = await df.join(otherDF, { on: 'ham', how: 'inner' }).collect();
    shape: (2, 4)
    ╭─────┬─────┬─────┬───────╮
    foobarhamapple
    │ --- ┆ --- ┆ --- ┆ --- │
    i64f64strstr
    ╞═════╪═════╪═════╪═══════╡
    16"a""x"
    ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
    27"b""y"
    ╰─────┴─────┴─────┴───────╯
  • Parameters

    Returns LazyDataFrame

  • Parameters

    • other: LazyDataFrame
    • options: {
          allowParallel?: boolean;
          forceParallel?: boolean;
          how: "cross";
          suffix?: string;
      }
      • OptionalallowParallel?: boolean
      • OptionalforceParallel?: boolean
      • how: "cross"
      • Optionalsuffix?: string

    Returns LazyDataFrame

  • Perform an asof join. This is similar to a left-join except that we match on nearest key rather than equal keys.

    Both DataFrames must be sorted by the asof_join key.

    For each row in the left DataFrame:

    • A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key.

    • A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key.

    • A "nearest" search selects the last row in the right DataFrame whose value is nearest to the left's key. String keys are not currently supported for a nearest search.

    The default is "backward".

    Parameters

    • other: LazyDataFrame

      DataFrame to join with.

    • options: {
          allowParallel?: boolean;
          by?: string | string[];
          byLeft?: string | string[];
          byRight?: string | string[];
          forceParallel?: boolean;
          leftOn?: string;
          on?: string;
          rightOn?: string;
          strategy?: "backward" | "forward" | "nearest";
          suffix?: string;
          tolerance?: string | number;
      }
      • OptionalallowParallel?: boolean

        Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel.

      • Optionalby?: string | string[]
      • OptionalbyLeft?: string | string[]

        join on these columns before doing asof join

      • OptionalbyRight?: string | string[]

        join on these columns before doing asof join

      • OptionalforceParallel?: boolean

        Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel.

      • OptionalleftOn?: string

        Join column of the left DataFrame.

      • Optionalon?: string

        Join column of both DataFrames. If set, leftOn and rightOn should be undefined.

      • OptionalrightOn?: string

        Join column of the right DataFrame.

      • Optionalstrategy?: "backward" | "forward" | "nearest"

        One of {'forward', 'backward', 'nearest'}

      • Optionalsuffix?: string

        Suffix to append to columns with a duplicate name.

      • Optionaltolerance?: string | number

        Numeric tolerance. By setting this the join will only be done if the near keys are within this distance. If an asof join is done on columns of dtype "Date", "Datetime" you use the following string language:

        • 1ns (1 nanosecond)
        • 1us (1 microsecond)
        • 1ms (1 millisecond)
        • 1s (1 second)
        • 1m (1 minute)
        • 1h (1 hour)
        • 1d (1 day)
        • 1w (1 week)
        • 1mo (1 calendar month)
        • 1y (1 calendar year)
        • 1i (1 index count)

        Or combine them:

        • "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds

    Returns LazyDataFrame

     >const gdp = pl.DataFrame({
    ... date: [
    ... new Date('2016-01-01'),
    ... new Date('2017-01-01'),
    ... new Date('2018-01-01'),
    ... new Date('2019-01-01'),
    ... ], // note record date: Jan 1st (sorted!)
    ... gdp: [4164, 4411, 4566, 4696],
    ... })
    >const population = pl.DataFrame({
    ... date: [
    ... new Date('2016-05-12'),
    ... new Date('2017-05-12'),
    ... new Date('2018-05-12'),
    ... new Date('2019-05-12'),
    ... ], // note record date: May 12th (sorted!)
    ... "population": [82.19, 82.66, 83.12, 83.52],
    ... })
    >population.joinAsof(
    ... gdp,
    ... {leftOn:"date", rightOn:"date", strategy:"backward"}
    ... )
    shape: (4, 3)
    ┌─────────────────────┬────────────┬──────┐
    datepopulationgdp
    │ --- ┆ --- ┆ --- │
    datetimes] ┆ f64i64
    ╞═════════════════════╪════════════╪══════╡
    2016-05-12 00:00:0082.194164
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
    2017-05-12 00:00:0082.664411
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
    2018-05-12 00:00:0083.124566
    ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
    2019-05-12 00:00:0083.524696
    └─────────────────────┴────────────┴──────┘
  • Serializes object to desired format via serde

    Parameters

    Returns Buffer

  • Evaluate the query in streaming mode and write to a CSV file.

    .. warning:: Streaming mode is considered unstable. It may be changed at any point without it being considered a breaking change.

    This allows streaming results that are larger than RAM to be written to disk.

    Parameters

    • path: string

      File path to which the file should be written.

    • Optionaloptions: SinkCsvOptions

    Returns void

  • Evaluate the query in streaming mode and write to a Parquet file.

    .. warning:: Streaming mode is considered unstable. It may be changed at any point without it being considered a breaking change.

    This allows streaming results that are larger than RAM to be written to disk.

    Parameters

    • path: string

      File path to which the file should be written.

    • Optionaloptions: SinkParquetOptions

    Returns void

  • Parameters

    • by: ColumnsOrExpr
    • Optionaldescending: ValueOrArray<boolean>
    • OptionalnullsLast: boolean
    • OptionalmaintainOrder: boolean

    Returns LazyDataFrame

  • Parameters

    • opts: {
          by: ColumnsOrExpr;
          descending?: ValueOrArray<boolean>;
          maintainOrder?: boolean;
          nullsLast?: boolean;
      }
      • by: ColumnsOrExpr
      • Optionaldescending?: ValueOrArray<boolean>
      • OptionalmaintainOrder?: boolean
      • OptionalnullsLast?: boolean

    Returns LazyDataFrame

  • Drop duplicate rows from this DataFrame. Note that this fails if there is a column of type List in the DataFrame.

    Parameters

    • OptionalmaintainOrder: boolean
    • Optionalsubset: ColumnSelection

      subset to drop duplicates for

    • Optionalkeep: "first" | "last"

      "first" | "last"

    Returns LazyDataFrame

  • Parameters

    • opts: {
          keep?: "first" | "last";
          maintainOrder?: boolean;
          subset?: ColumnSelection;
      }
      • Optionalkeep?: "first" | "last"
      • OptionalmaintainOrder?: boolean
      • Optionalsubset?: ColumnSelection

    Returns LazyDataFrame